Skip to content
Snippets Groups Projects
Commit 432d5c06 authored by burnout87's avatar burnout87
Browse files

using dedicated function for cleaning source title, remvoed plugin requirement

parent 8394de93
No related branches found
No related tags found
No related merge requests found
%% Cell type:code id:f42afc40 tags:parameters
``` python
import os,sys
rev_num = 0
use_isgri = True
use_jemx1 = True
use_jemx2 = True
E1_keV = "28.0"
E2_keV = "40.0"
J_E1_keV = "3.0"
J_E2_keV = "20.0"
osa_version = 'OSA11.2'
detection_threshold = 7
host_type = 'staging'
isgri_systematic_fraction = 0.015
jemx_systematic_fraction = 0.05
lc_time_bin = 3000
data_version = 'CONS'
output_yaml_dir = 'rev' # A folder to store the yaml file
token=''
batch_run = False
notebooks_folder = os.getcwd()
```
%% Cell type:code id:f2f901c0 tags:
``` python
import sys
import oda_integral_wrapper.wrapper
from oda_api.plot_tools import OdaLightCurve
```
%% Cell type:code id:d2508e02 tags:
``` python
import oda_integral_wrapper.itime as itime
if rev_num <= 0:
now = itime.now()
rev_num = '%04d' % (int(now.REVNUM) + rev_num)
data_version = 'NRT'
use_jemx2 = False
print("We force NRT data and no use of JEM-X2")
else:
rev_num = '%04d' % int(rev_num)
print("Revolution ", rev_num)
```
%% Cell type:code id:97950e40 tags:
``` python
import logging
#logging.getLogger().setLevel(logging.WARNING)
logging.getLogger().setLevel(logging.INFO) #for more verbose logging
logging.getLogger('').addHandler(logging.StreamHandler())
```
%% Cell type:code id:0ba01826 tags:
``` python
import oda_api.token
if token == '':
token = oda_api.token.discover_token()
oda_api.token.decode_oda_token(token)
```
%% Cell type:code id:c2c1b193 tags:
``` python
import yaml
wrap=oda_integral_wrapper.wrapper.INTEGRALwrapper(token=token, integral_data_rights='all-private',
host_type=host_type)
#print(wrap.disp.url)
observation_title="rev. "+rev_num
yaml_file_path = os.path.join('rev', 'rev_%s.yaml' % rev_num)
#print(observation_title)
try:
output_get = wrap.disp.get_yaml_files_observation_with_title(observation_title=observation_title, token=token)
observations = yaml.safe_load(output_get['file_content'])
if output_get['file_content'] == '':
try:
with open(yaml_file_path) as file:
observations = yaml.load(file, Loader=yaml.FullLoader)
except:
raise Exception('The file rev_%s.yaml must exist to process the LCs' % rev_num)
except:
try:
with open(yaml_file_path) as file:
observations = yaml.load(file, Loader=yaml.FullLoader)
except:
raise Exception('The file rev_%s.yaml must exist to process the LCs' % rev_num)
```
%% Cell type:code id:47014b40 tags:
``` python
for source in observations:
tt = source['scw_list']
pp = ["%016.3f" %(float(x)) for x in tt]
source['scw_list'] = pp
print(source['title'], pp)
#print(observations)
```
%% Cell type:code id:f02e1c5b tags:
``` python
import astropy.io.fits as fits
import numpy as np
from astroquery.simbad import Simbad
from astropy import units as u
from astropy.coordinates import SkyCoord
import copy
from importlib import reload
import json
```
%% Cell type:code id:8b5d064b tags:
``` python
## It does not work from papermill
# import ipynbname
# nb_fname = ipynbname.name()
nb_fname="Generic Revolution LC.ipynb"
from git import Repo
try:
repo = Repo(notebooks_folder)
repo_name = repo.remotes.origin.url.split('.git')[0]
origin_notebook = repo_name.replace(':','/').replace('git@', 'https://') + \
'/-/blob/master/' + nb_fname
except:
origin_notebook = 'https://gitlab.astro.unige.ch/oda/product-gallery/gallery-notebooks/-/blob/master/' + nb_fname
print(origin_notebook)
```
%% Cell type:code id:bc873b69 tags:
``` python
reload(oda_integral_wrapper.wrapper)
wrap=oda_integral_wrapper.wrapper.INTEGRALwrapper(token=token, integral_data_rights='all-private', )
```
%% Cell type:code id:055d4bf6 tags:
``` python
looping = True
import time
# This looping allows us to submit all jobs at once !
n_loops=0
max_loops=200
while looping:
for source in observations:
if 'processed_mosaics' in source and source['processed_mosaics']:
scw_list=source['scw_list']
#print(len(scw_list))
if use_isgri:
if source.get('isgri_lc', None) is None:
if source.get('isgri_source_catalog', None) is None or \
(len(json.loads(source['isgri_source_catalog'])['cat_column_list'][1]) == 0):
print('ISGRI LCs for %s are not extracted because no sources are present' % source['title'])
source.update({'isgri_lc' : 'Error'})
else:
par_dict = {
'instrument': 'isgri',
'product': 'isgri_lc',
'E1_keV' : E1_keV,
'E2_keV' : E2_keV,
'osa_version' : osa_version,
'product_type' : 'Real',
'src_name' : source['title'],
'RA':source['RA'],
'DEC' : source['Dec'],
'selected_catalog': source['isgri_source_catalog']
}
if 'T1' in source.keys():
pardict.update({
'T1' : source['tstart'],
'T2' : source['tstop']
})
try:
data=wrap.long_scw_list_call(scw_list, s_max=500, sleep_time=120,
save_partial_products=False,
wait=False, **par_dict)
source.update({'isgri_lc' : data})
except:
print('ISGRI lc for %s failed' % source['title'])
source.update({'isgri_lc' : 'Error'})
if use_jemx1:
if source.get('jemx1_lc', None) is None:
#print("Catlog length ", len(json.loads(source['jemx1_source_catalog'])['cat_column_list'][1]))
if source.get('jemx1_source_catalog',None) is None or \
(len(json.loads(source['jemx1_source_catalog'])['cat_column_list'][1]) == 0):
print('JEMX1 LCs for %s are not extracted because no sources are present' % source['title'])
source.update({'jemx1_lc' : 'Error'})
else:
par_dict_j = { 'instrument' : 'jemx',
'product' : 'jemx_lc',
'jemx_num' : 1,
'E1_keV' : J_E1_keV,
'E2_keV' : J_E2_keV,
'osa_version' : osa_version,
'product_type' : 'Real',
'src_name' : source['title'],
'RA' : source['RA'],
'DEC' : source['Dec'],
'selected_catalog': source['jemx1_source_catalog']
}
if 'tstart' in source.keys():
par_dict_j.update({ 'T1' : source['tstart'],
'T2' : source['tstop'] })
try:
data=wrap.long_scw_list_call(scw_list, s_max=500, sleep_time=120, save_partial_products=False,
wait=False, **par_dict_j )
source.update({'jemx1_lc' : data})
except:
print('JEM-X1 lc for %s failed' % source['title'])
source.update({'jemx1_lc' : 'Error'})
if use_jemx2:
if source.get('jemx2_lc', None) is None:
if source.get('jemx2_source_catalog', None) is None or \
(len(json.loads(source['jemx2_source_catalog'])['cat_column_list'][1]) == 0):
print('JEMX2 LCs for %s are not extracted because no sources are present' % source['title'])
source.update({'jemx2_lc' : 'Error'})
else:
par_dict_j = { 'instrument' : 'jemx',
'product' : 'jemx_lc',
'jemx_num' : 2,
'E1_keV' : J_E1_keV,
'E2_keV' : J_E2_keV,
'osa_version' : osa_version,
'product_type' : 'Real',
'src_name' : source['title'],
'RA' : source['RA'],
'DEC' : source['Dec'],
'time_bin' : lc_time_bin,
'selected_catalog': source['jemx2_source_catalog']
}
if 'tstart' in source.keys():
par_dict_j.update({ 'T1' : source['tstart'],
'T2' : source['tstop'] })
try:
data=wrap.long_scw_list_call(scw_list, s_max=500, sleep_time=120,
save_partial_products=False,
wait=False, **par_dict_j )
source.update({'jemx2_lc' : data})
except:
print('JEM-X2 lc for %s failed' % source['title'])
source.update({'jemx2_lc' : 'Error'})
else:
source.update({'isgri_lc' : 'Error'})
source.update({'jemx1_lc' : 'Error'})
source.update({'jemx2_lc' : 'Error'})
#Checks if I need to ask again
looping=False
for ii in observations:
if use_isgri:
if ii.get('isgri_lc',None) is None:
print('Need to loop again on isgri for %s ' % ( ii['title']))
looping = True
if use_jemx1:
if ii.get('jemx1_lc',None) is None:
print('Need to loop again on jemx1 for %s ' % ( ii['title']))
looping = True
if use_jemx2:
if ii.get('jemx2_lc',None) is None:
print('Need to loop again on jemx2 for %s ' % ( ii['title']))
looping = True
if looping and n_loops < max_loops:
time.sleep(120)
n_loops+=1
else:
looping=False
```
%% Cell type:code id:783a1be1 tags:
``` python
import re
reg_ex_source_title = r'[\W]+'
reload(oda_integral_wrapper.wrapper)
wrap=oda_integral_wrapper.wrapper.INTEGRALwrapper(token=token, integral_data_rights='all-private', )
for source in observations:
sanitized_source_title = re.sub(reg_ex_source_title, '_', source['title'])
sanitized_source_title = oda_integral_wrapper.wrapper.INTEGRALwrapper.clean_source_title(source['title'])
pattern = sanitized_source_title + '_' + str(source['expid'])
if use_isgri:
lc_isgri=source.get('isgri_lc', None)
if lc_isgri is not None and type(lc_isgri) is not str:
isgri_sources, isgri_lcs, isgri_tstarts, isgri_tstops, isgri_exposures = \
wrap.write_all_lc_fits_files(lc_isgri, pattern)
source.update({'isgri_files' : {'fname': isgri_lcs, 'tstart':isgri_tstarts,
'tstop': isgri_tstops,
'exposure': isgri_exposures},
'isgri_sources': isgri_sources})
if use_jemx1:
lc_jemx1 = source.get('jemx1_lc', None)
#print(type(lc_jemx1))
if lc_jemx1 is not None and type(lc_jemx1) is not str:
jemx1_sources, jemx1_lcs, jemx1_tstarts, jemx1_tstops, jemx1_exposures = \
wrap.write_all_lc_fits_files(lc_jemx1, pattern)
source.update({'jemx1_files' : {'fname': jemx1_lcs, 'tstart':jemx1_tstarts,
'tstop': jemx1_tstops,
'exposure': jemx1_exposures},
'jemx1_sources': jemx1_sources})
if use_jemx2:
lc_jemx2 = source.get('jemx2_lc', None)
#print(type(lc_jemx1))
if lc_jemx2 is not None and type(lc_jemx2) is not str:
jemx2_sources, jemx2_lcs, jemx2_tstarts, jemx2_tstops, jemx2_exposures = \
wrap.write_all_lc_fits_files(lc_jemx2, pattern)
source.update({'jemx2_files' : {'fname': jemx2_lcs, 'tstart':jemx2_tstarts,
'tstop': jemx2_tstops,
'exposure': jemx2_exposures},
'jemx2_sources': jemx2_sources})
```
%% Cell type:code id:3a980fda tags:
``` python
# Upload LC to Gallery
import oda_api.plot_tools
import re
reload(oda_api.plot_tools)
reload(oda_integral_wrapper.wrapper)
wrap2=oda_integral_wrapper.wrapper.INTEGRALwrapper(token=token, host_type=host_type)
instruments = []
if use_isgri:
instruments.append('isgri')
if use_jemx1:
instruments.append('jemx1')
if use_jemx2:
instruments.append('jemx2')
if batch_run:
additional_information = _i2
else:
additional_information = _i1
# remove any token from the additional_information
token_pattern = r"token\s*=\s*[\'\"].*?[\'\"]"
additional_information = re.sub(token_pattern, 'token = \"<Insert yout token here>\"', additional_information, flags=re.DOTALL)
host_type_pattern = r"host_type\s*=\s*[\'\"].*?[\'\"]"
additional_information = re.sub(host_type_pattern, 'host_type = \"production\"', additional_information, flags=re.DOTALL)
notebooks_folder_pattern = r"notebooks_folder\s*=\s*[\'\"].*?[\'\"]\n"
additional_information = re.sub(notebooks_folder_pattern, '', additional_information, flags=re.DOTALL)
for obs in observations:
print(obs)
for ins in instruments:
lc = obs.get('%s_lc' % ins, None)
systematic_fraction = isgri_systematic_fraction
if 'jemx' in ins:
systematic_fraction = jemx_systematic_fraction
if lc is not None and type(lc) is not str:
sources = obs['%s_sources' % ins]
image_product = oda_api.plot_tools.OdaLightCurve(lc)
#print(lc.as_list())
systematic_fraction = isgri_systematic_fraction
xlim = [20,200]
if 'jemx' in ins:
systematic_fraction = jemx_systematic_fraction
xlim = [2,30]
E1_keV = 1
E2_keV = 100
k1 = 'E1_isgri'
k2 = 'E2_isgri'
if 'jemx' in ins:
k1 = 'E1_jemx'
k2 = 'E2_jemx'
E1_keV = obs.get(k1, E1_keV)
E2_keV = obs.get(k2, E2_keV)
print(f"E1_kev: {E1_keV}")
print(f"E2_keV: {E2_keV}")
print(f"k1: {k1}")
print(f"k2: {k2}")
for i,src in enumerate(sources):
#print(src)
if not os.path.isdir('out'):
os.mkdir('out')
img_fn = image_product.get_image_for_gallery( in_source_name=src,
systematic_fraction=systematic_fraction,
output_folder='out')
par_dict_product_id = {
'source_name': src,
# 't1': obs['tstart'],
# 't2': obs['tstop'],
# 'e1_kev' : E1_keV,
# 'e2_kev' : E2_keV,
'obsid': obs['obsid'],
'instrument' : '%s' % ins,
'product_type' : '%s_lc' % ins,
"rev_num" : rev_num
}
product_id = oda_api.api.DispatcherAPI.calculate_param_dict_id(par_dict_product_id)
lc = obs['%s_files' % ins]['fname'][i]
nrt_string = ''
if data_version.upper() == 'NRT':
nrt_string = ' (NRT)'
par_dict={
'token': token,
#if observation is incomplete (NRT) it would create new products at a later run
'T1': re.sub('\.\d{3}', '', obs['tstart']),
'T2': re.sub('\.\d{3}', '', obs['tstop']),
'e1_kev' : E1_keV,
'e2_kev' : E2_keV,
'product_title' : src + ' %s light curve' % ins + nrt_string,
'gallery_image_path' : img_fn,
'fits_file_path' : [lc],
'src_name' : src,
'instrument' : ins,
'insert_new_source' : True,
'force_insert_not_valid_new_source' : False,
'validate_source' : True,
'apply_fields_source_resolution': True,
'product_type' : '%s_lc' % ins ,
'product_id' : product_id,
#input parameters assuming they are in cell #1
'additional_information' : additional_information,
'html_image': image_product.get_html_image(src, systematic_fraction),
'produced_by' : origin_notebook
}
n_max_tries = 3
n_tries_left = n_max_tries
#print(par_dict)
while True:
try:
d = wrap2.disp.post_data_product_to_gallery(**par_dict)
source.update({
"%s_gallery_object" % ins : d.copy(),
})
if 'error_message' not in d:
source.update({
'processed_lc': True
})
break
else:
n_tries_left -= 1
if n_tries_left == 0:
break
else:
print(f"Exception while posting a product on the gallery, will re-attempt to post {n_tries_left} times")
except Exception as e:
print(f"Exception while posting a product on the gallery, will re-attemp to post:\n{e}")
n_tries_left -= 1
if n_tries_left == 0:
break
else:
print(f"Exception while posting a product on the gallery, will re-attempt to post {n_tries_left} times")
print(obs)
```
%% Cell type:code id:308d2096 tags:
``` python
wrap2.disp.url
```
%% Cell type:code id:9c547dc3 tags:
``` python
oda_integral_wrapper.wrapper.dump_yaml(observations, yaml_file_path)
```
%% Cell type:code id:6d140f74 tags:
``` python
reload(oda_integral_wrapper.wrapper)
if not os.path.isdir(output_yaml_dir):
os.mkdir(output_yaml_dir)
yaml_file_path = os.path.join(output_yaml_dir, 'rev_%s.yaml' % rev_num)
observations = oda_integral_wrapper.wrapper.INTEGRALwrapper.clean_and_update_observations(
observations, dictionary_to_update={'E1_isgri' : E1_keV,
'E2_isgri' : E2_keV,
'E1_jemx' : J_E1_keV,
'E2_jemx' : J_E2_keV})
oda_integral_wrapper.wrapper.dump_yaml(observations, yaml_file_path)
with open(yaml_file_path) as file:
observations = yaml.load(file, Loader=yaml.FullLoader)
print(observations)
```
%% Cell type:code id:ab8fde01 tags:
``` python
time_dict = oda_integral_wrapper.wrapper.INTEGRALwrapper.converttime('REVNUM', rev_num, 'ANY')
utc_start_rev = oda_integral_wrapper.wrapper.INTEGRALwrapper.converttime('IJD', time_dict['IJD'].split()[1],
'UTC')
utc_start_rev = re.sub('\.\d{3}', '', utc_start_rev)
utc_end_rev = oda_integral_wrapper.wrapper.INTEGRALwrapper.converttime('IJD', time_dict['IJD'].split()[2],
'UTC')
utc_end_rev = re.sub('\.\d{3}', '', utc_end_rev)
```
%% Cell type:code id:efca9980 tags:
``` python
reload(oda_integral_wrapper.wrapper)
obsids = [oo['obsid'] for oo in observations]
par_dict_gallery = {
'observation_title' : "rev. %s" % rev_num,
'T1' : utc_start_rev,
'T2' : utc_end_rev,
'yaml_file_path': yaml_file_path,
'obsid' : obsids,
'token' : token,
'create_new': True
}
wrap2=oda_integral_wrapper.wrapper.INTEGRALwrapper(token=token, host_type=host_type)
d = wrap2.disp.update_observation_with_title(**par_dict_gallery)
```
%% Cell type:code id:5e8d334b tags:output
``` python
observations
```
......
%% Cell type:code id:6778cc43 tags:parameters
``` python
import os,sys
rev_num = 0
#If <=0 it will take the current revolution (zero) or previous ones, select NRT as data_version
E1_keV = "28.0"
E2_keV = "40.0"
J_E1_keV = "3.0"
J_E2_keV = "20.0"
osa_version = 'OSA11.2'
detection_threshold = 7
host_type = 'production'
to_exclude_from_title = '' # 'Cen A'
#if this string is in the title of the observation, this is excluded from the analysis. Leave '' to not use it.
use_isgri = True
use_jemx1 = True
use_jemx2 = True
include_new_sources = False
data_version = 'CONS' #It can be CONS or NRT
output_yaml_dir = 'rev' # A folder to store the yaml file
batch_run = False
token = ""
notebooks_folder = os.getcwd()
```
%% Cell type:code id:a19a679d tags:
``` python
import astropy.io.fits as fits
import numpy as np
from astroquery.simbad import Simbad
from astropy import units as u
from astropy.coordinates import SkyCoord
import copy
import re
import os
import pandas as pd
import os,sys
from importlib import reload
import json
```
%% Cell type:code id:9f927d54 tags:
``` python
## It does not work from papermill
# import ipynbname
# nb_fname = ipynbname.name()
nb_fname="Generic Revolution Mosaics.ipynb"
from git import Repo
try:
repo = Repo(notebooks_folder)
repo_name = repo.remotes.origin.url.split('.git')[0]
origin_notebook = repo_name.replace(':','/').replace('git@', 'https://') + \
'/-/blob/master/' + nb_fname
except:
origin_notebook = 'https://gitlab.astro.unige.ch/oda/product-gallery/gallery-notebooks/-/blob/master/'
+ nb_fname
#origin_notebook = "<a href=\"%s\" target=\"blank\">%s</a>" %(origin_notebook, nb_fname.replace('%20', ' '))
print(origin_notebook)
```
%% Cell type:code id:ebbea13d tags:
``` python
import logging
#logging.getLogger().setLevel(logging.WARNING)
logging.getLogger().setLevel(logging.INFO) #for more verbose logging
logging.getLogger('').addHandler(logging.StreamHandler())
logger = logging.getLogger()
```
%% Cell type:code id:4ebb2ba5 tags:
``` python
import oda_integral_wrapper.wrapper
import oda_integral_wrapper.itime as itime
import oda_integral_wrapper.planning as planning
import oda_api.plot_tools
```
%% Cell type:code id:d6382edb tags:
``` python
import oda_api.token
from os import environ, getcwd, path
if token == '':
token = oda_api.token.discover_token()
oda_api.token.decode_oda_token(token)
```
%% Cell type:code id:b1f533e8 tags:
``` python
wrap = oda_integral_wrapper.wrapper.INTEGRALwrapper(token=token, host_type=host_type)
```
%% Cell type:code id:b5804721 tags:
``` python
reload(itime)
if rev_num <= 0:
now = itime.now()
rev_num = '%04d' % (int(now.REVNUM) + rev_num)
data_version = 'NRT'
use_jemx2 = False
include_new_sources = True
print("We force NRT data, no use of JEM-X2, and show new sources.")
else:
rev_num = '%04d' % int(rev_num)
print("Revolution ", rev_num)
```
%% Cell type:code id:c27b4548 tags:
``` python
reload(planning)
if data_version == 'CONS':
pointings = planning.get_pointings(rev_num, 'historic')
else:
pointings = planning.get_pointings(rev_num, 'predicted', max_version=15)
```
%% Cell type:code id:515581dc tags:
``` python
pointings
```
%% Cell type:code id:f2ff018e tags:
``` python
reload(itime)
now = itime.now()
now.UTC
```
%% Cell type:code id:ce1dc9ea tags:
``` python
now.SCWID
```
%% Cell type:code id:3fb94f82 tags:
``` python
#If SCWID is not disponible
if now.SCWID == '000000000000':
mask = (pointings['DURATION'] > 500.) & (pointings['POINTING_TYPE'] == 0)
else:
mask = (pointings['DURATION'] > 500.) & (pointings['POINTING_TYPE'] == 0) & (pointings['POINTING_ID'] < now.SCWID[0:8])
```
%% Cell type:code id:bcd68827 tags:
``` python
expids = set([ ss for ss in pointings['EXPID'][mask] if ss != ''])
expids
```
%% Cell type:code id:94fc7d21 tags:
``` python
data_version_string = '.001'
if data_version.upper() == 'NRT':
data_version_string = '.000'
scw_list=[s+'0010'+data_version_string for s in pointings['POINTING_ID'][mask]]
```
%% Cell type:code id:9aa22073 tags:
``` python
reload(planning)
pod = planning.get_pod(rev_num)
```
%% Cell type:code id:c0573838 tags:
``` python
print(pod)
ff=fits.open(pod)
pod_data = ff[1].data
ff.close()
```
%% Cell type:code id:d11d3f75 tags:
``` python
pod_data
```
%% Cell type:code id:453c90c8 tags:
``` python
reload(planning)
pad = planning.get_current_pad(rev_num)
```
%% Cell type:code id:b68a1701 tags:
``` python
observations = []
non_decimal = re.compile(r'[^\d.]+')
for expid in expids:
scw_list=sorted([s+'0010'+data_version_string for i, s in enumerate(pointings['POINTING_ID'][mask]) \
if pointings['EXPID'][mask][i] == expid])
t1 = sorted([s for i, s in enumerate(pointings['TIME'][mask]) \
if pointings['EXPID'][mask][i] == expid])
t2 = sorted([s[0]+s[1]/86400. for i, s in enumerate(zip(pointings['TIME'][mask], pointings['DURATION'][mask])) \
if pointings['EXPID'][mask][i] == expid])
for row in pod_data:
if row['EXP_ID'] == expid:
pod_record = row
break
if 'dummy' in pod_record['SRC_NAME'].lower():
print('Does not process Dummy Pointings')
continue
pad_record=[pod_record['OBS_ID'][0:7], pod_record['SRC_NAME'].encode(), 1, b'1', b'unknown']
for pp in pad:
#print(pp[0].decode(), pod_record['OBS_ID'][0:7])
if pp[0].decode() == pod_record['OBS_ID'][0:7]:
pad_record = pp
break
if 'russia' in pp[6].decode().lower() and data_version.lower() == 'nrt':
print('Data are from Russian Federation reserved time')
continue
if to_exclude_from_title != '':
if to_exclude_from_title.lower() in pod_record['SRC_NAME'].lower():
print('Not running "' + pod_record['SRC_NAME'] +
'" for user decision, as %s is in the title' % to_exclude_from_title)
continue
#I use only available science windows !
new_scw_list = []
new_t1 = []
new_t2 = []
for ss, tmp1, tmp2 in zip(scw_list,t1,t2):
if tmp2 < now.IJD - 1./24.:
new_scw_list.append(ss)
new_t1.append(tmp1)
new_t2.append(tmp2)
if len(new_scw_list) > 0:
observation = {
't1' : new_t1,
't2' : new_t2,
'expid' : expid,
'scw_list': new_scw_list,
'obsid' : pod_record['OBS_ID'],
'title' : pod_record['SRC_NAME'],
'extended_title' : pad_record[1].decode().strip(),
'pi' : re.sub(' +', ' ', pad_record[4].decode().strip()),
'RA': non_decimal.sub('', str(pod_record['RA_OBJ'])),
'Dec': non_decimal.sub('', str(pod_record['DEC_OBJ']))
}
observations.append(observation)
```
%% Cell type:code id:60e96edf tags:
``` python
obsids=sorted(set([oo['obsid'] for oo in observations]))
```
%% Cell type:code id:84af83a2 tags:
``` python
#Merging different eXPiDs
merged_obs=[]
for i,o1 in enumerate(observations):
if i in merged_obs or i >= len(observations)-1:
continue
for j in range(i+1, len(observations)):
o2 = observations[j]
if o2['obsid'] == o1['obsid']:
merged_obs.append(j)
print('merging')
o1['scw_list']+=o2['scw_list']
o1['scw_list']=sorted(o1['scw_list'])
o1['t1']+=o2['t1']
o1['t1']=sorted(o1['t1'])
o1['t2']+=o2['t2']
o1['t2']=sorted(o1['t2'])
print(o1['scw_list'])
```
%% Cell type:code id:e96d67bc tags:
``` python
count=0
#MErged obss can be not ordered and here we assume it is ordered, so we need to enforce it !
for j in sorted(merged_obs):
observations.pop(j-count)
count+=1
```
%% Cell type:code id:837a56b7 tags:
``` python
observations
```
%% Cell type:code id:8e999c3b tags:
``` python
wrap=oda_integral_wrapper.wrapper.INTEGRALwrapper(token=token, integral_data_rights='all-private',
host_type=host_type )
```
%% Cell type:code id:1c12b52a tags:
``` python
# Get tstart and tstop from pointing times
for oo in observations:
oo['tstart'] = wrap.converttime('IJD', oo['t1'][0], 'UTC')
oo['tstop'] = wrap.converttime('IJD', oo['t2'][-1], 'UTC')
```
%% Cell type:code id:7ad054c2 tags:
``` python
from astropy.io import fits
ff=fits.open('https://www.isdc.unige.ch/integral/catalog/43/gnrl_refr_cat_0043.fits.gz')
isdc_sources = ff[1].data
ff.close()
```
%% Cell type:code id:14da09ad tags:
``` python
looping = True
import time
# This looping allows us to submit all jobs at once !
while looping:
for source in observations:
scw_list=source['scw_list']
#print(len(scw_list))
if use_isgri:
if source.get('isgri_raw_mosaic', None) is None:
par_dict = {
'instrument': 'isgri',
'product': 'isgri_image',
'E1_keV' : E1_keV,
'E2_keV' : E2_keV,
'osa_version' : osa_version,
'detection_threshold' : detection_threshold,
'product_type' : 'Real',
'src_name' : source['title'],
'RA':source['RA'],
'DEC' : source['Dec']
}
if 'T1' in source.keys():
pardict.update({
'T1' : source['tstart'],
'T2' : source['tstop']
})
try:
data=wrap.long_scw_list_call(scw_list, s_max=500, sleep_time=120, save_partial_products=False,
wait=False, **par_dict)
source.update({'isgri_raw_mosaic' : data})
except:
print('ISGRI mosaic for %s failed' % source['title'])
source.update({'isgri_raw_mosaic' : 'Error'})
if use_jemx1:
if source.get('jemx1_raw_mosaic', None) is None:
par_dict_j = { 'instrument' : 'jemx',
'product' : 'jemx_image',
'jemx_num' : 1,
'E1_keV' : J_E1_keV,
'E2_keV' : J_E2_keV,
'osa_version' : osa_version,
'detection_threshold' : detection_threshold,
'product_type' : 'Real',
'src_name' : source['title'],
'RA' : source['RA'],
'DEC' : source['Dec']
}
if 'tstart' in source.keys():
par_dict_j.update({ 'T1' : source['tstart'],
'T2' : source['tstop'] })
try:
data=wrap.long_scw_list_call(scw_list, s_max=500, sleep_time=120, save_partial_products=False,
wait=False, **par_dict_j )
source.update({'jemx1_raw_mosaic' : data})
except:
print('JEM-X1 mosaic for %s failed' % source['title'])
source.update({'jemx1_raw_mosaic' : 'Error'})
if use_jemx2:
if source.get('jemx2_raw_mosaic', None) is None:
par_dict_j = { 'instrument' : 'jemx',
'product' : 'jemx_image',
'jemx_num' : 2,
'E1_keV' : J_E1_keV,
'E2_keV' : J_E2_keV,
'osa_version' : osa_version,
'detection_threshold' : detection_threshold,
'product_type' : 'Real',
'src_name' : source['title'],
'RA' : source['RA'],
'DEC' : source['Dec']
}
if 'tstart' in source.keys():
par_dict_j.update({ 'T1' : source['tstart'],
'T2' : source['tstop'] })
try:
data=wrap.long_scw_list_call(scw_list, s_max=500, sleep_time=120,
save_partial_products=False,
wait=False, **par_dict_j )
source.update({'jemx2_raw_mosaic' : data})
except:
print('JEM-X2 mosaic for %s failed' % source['title'])
source.update({'jemx2_raw_mosaic' : 'Error'})
#Checks if I need to ask again
looping=False
for ii in observations:
if use_isgri:
if ii.get('isgri_raw_mosaic',None) is None:
print('Need to loop again on isgri for %s ' % ( ii['title']))
looping = True
if use_jemx1:
if ii.get('jemx1_raw_mosaic',None) is None:
print('Need to loop again on jemx1 for %s ' % ( ii['title']))
looping = True
if use_jemx2:
if ii.get('jemx2_raw_mosaic',None) is None:
print('Need to loop again on jemx2 for %s ' % ( ii['title']))
looping = True
if looping:
time.sleep(120)
```
%% Cell type:code id:0ba6a752 tags:
``` python
reload(oda_api)
isgri_params_dic_product_id = {
'E1_keV': E1_keV,
'E2_keV': E2_keV,
'osa_version': osa_version,
'detection_threshold': detection_threshold,
'instrument': 'isgri',
'product': 'isgri_image'
}
jemx1_params_dic_product_id = {
'J_E1_keV': J_E1_keV,
'J_E2_keV': J_E2_keV,
'osa_version': osa_version,
'detection_threshold': detection_threshold,
'instrument' : 'jemx',
'product' : 'jemx_image',
'jemx_num' : 1
}
jemx2_params_dic_product_id = {
'J_E1_keV': J_E1_keV,
'J_E2_keV': J_E2_keV,
'osa_version': osa_version,
'detection_threshold': detection_threshold,
'instrument' : 'jemx',
'product' : 'jemx_image',
'jemx_num' : 2
}
if use_isgri:
isgri_request_product_id = oda_api.api.DispatcherAPI.calculate_param_dict_id(isgri_params_dic_product_id)
print(isgri_request_product_id)
if use_jemx1:
jemx1_request_product_id = oda_api.api.DispatcherAPI.calculate_param_dict_id(jemx1_params_dic_product_id)
print(jemx1_request_product_id)
if use_jemx2:
jemx2_request_product_id = oda_api.api.DispatcherAPI.calculate_param_dict_id(jemx2_params_dic_product_id)
print(jemx2_request_product_id)
```
%% Cell type:markdown id:25bc5050 tags:
## Image cleaning
%% Cell type:code id:60ce5f7a tags:
``` python
reload(oda_api.plot_tools)
reload(oda_integral_wrapper.wrapper)
wrap=oda_integral_wrapper.wrapper.INTEGRALwrapper(token=token, integral_data_rights='all-private',
host_type=host_type )
import mosaic.treat
reload(mosaic.treat)
import subprocess
import re
# import pickle
from glob import glob
instruments = []
if use_isgri:
instruments.append('isgri')
if use_jemx1:
instruments.append('jemx1')
if use_jemx2:
instruments.append('jemx2')
reg_ex_source_title = r'[\W]+'
for instr in instruments:
for source in observations:
# if 'Gal' not in source['title'] or 'isgr' not in instr:
# continue
data=source['%s_raw_mosaic' % instr]
if type(data) == str or data is None:
continue
sanitized_source_title = re.sub(reg_ex_source_title, '_', source['title'])
sanitized_source_title = oda_integral_wrapper.wrapper.INTEGRALwrapper.clean_source_title(source['title'])
outfile_name = sanitized_source_title + '_' + str(source['expid']) + '_%s_mosaic.fits' % instr
if hasattr(data,'mosaic_image_0_mosaic' ):
data.mosaic_image_0_mosaic.write_fits_file(outfile_name, overwrite=True)
else:
print('Mosaic "%s" for %s is empty' % (source['title'], instr))
continue
print(f"Querying the object {source['title']} from Simbad")
try:
simbad = Simbad.query_object(source['title'])
if simbad is None:
object_of_interest = None
else:
coord = SkyCoord(simbad['RA'], simbad['DEC'], unit=[u.hour, u.deg])
object_of_interest = [(source['title'],coord)]
except:
object_of_interest = None
print(f"Error when querying the object {source['title']} from Simbad")
data2 = copy.copy(data)
api_cat_str = wrap.extract_catalog_string_from_image(data2, det_sigma=detection_threshold,
objects_of_interest=object_of_interest,
update_catalog=True,
include_new_sources=include_new_sources,
new_source_suffix='_'+source['obsid'])
#Compute fluxes on the original image
try:
fluxed_catalog = wrap.compute_fluxes(data2, detection_threshold)
except:
logger.warning("We could not compute the fluxes of source, returning None!!!")
fluxed_catalog = None
#print(get_html(fluxed_catalog))
source_name = source['title']
if api_cat_str is not None:
#api_cat = json.loads(api_cat_str)
api_cat_fname = 'api_cat_str_%s_%s.txt'%(sanitized_source_title, instr)
source.update({'api_cat_fname' : api_cat_fname})
with open(api_cat_fname,'w') as f:
f.write(api_cat_str)
sources=wrap.extract_catalog_table_from_image(data2, objects_of_interest=object_of_interest)
# change with outfile_path ?
oia = mosaic.treat.OSAMosaicImageAnalysis(outfile_name,
outfile_name.replace('_mosaic.fits','_mosaic_clean_'),
source_analysis = True,
exposure_fraction_cut = 100)
oia.reference_catalog = isdc_sources
try:
oia.main()
except:
print("WARNING\nCould not process " + outfile_name + '\n SKIPPING')
continue
csv_outfile_name = outfile_name.replace('_mosaic.fits','_mosaic_clean_source_results.csv')
if os.path.isfile(csv_outfile_name):
sources_sextractor = pd.read_csv(csv_outfile_name)
else:
sources_sextractor = []
if instr == 'isgri':
sextractor_fname = outfile_name.replace('_mosaic.fits',
'_mosaic_clean_significance%.0f_%.0f.fits') % (float(E1_keV),
float(E2_keV))
else:
sextractor_fname = glob(outfile_name.replace('_mosaic.fits',
'_mosaic_clean_significance%s*_*.fits' % J_E1_keV[0]))[0]
print("Using This file name " + sextractor_fname)
f_image_sexttractor = fits.open(sextractor_fname)
# We get the actual start and stop times
image_sextractor = f_image_sexttractor[0].data
f_image_sexttractor.close()
data2.mosaic_image_0_mosaic.data_unit[4].data = image_sextractor
if 'DATE-OBS' in data2.mosaic_image_0_mosaic.data_unit[1].header:
source['tstart'] = data2.mosaic_image_0_mosaic.data_unit[1].header['DATE-OBS']
else:
t_start_ijd = data2.mosaic_image_0_mosaic.data_unit[1].data['TSTART'][0]
source['tstart'] = wrap.converttime('IJD', t_start_ijd, 'UTC')
if 'DATE-END' in data2.mosaic_image_0_mosaic.data_unit[1].header:
source['tstop'] = data2.mosaic_image_0_mosaic.data_unit[1].header['DATE-END']
else:
t_stop_ijd = data2.mosaic_image_0_mosaic.data_unit[1].data['TSTOP'][0]
if t_stop_ijd == t_start_ijd:
#print(np.nanmax(data2.mosaic_image_0_mosaic.data_unit[5].data)/86400.)
t_stop_ijd += np.nanmax(data2.mosaic_image_0_mosaic.data_unit[5].data)/86400.
print('Update tstop')
source['tstop'] = wrap.converttime('IJD', t_stop_ijd, 'UTC')
sources2 = copy.copy(sources)
mask = np.zeros(len(sources2),dtype=bool)
for i, ss in enumerate(sources2):
#print(i,ss)
if include_new_sources:
if "NEW" in ss['src_names']:
mask[i] = True
continue
if len(sources_sextractor)>0:
for ss2 in sources_sextractor['name']:
if ss2 == ss['src_names']:
#found = True
mask[i] = True
#print (ss['src_names'])
break
else:
#patch if sextractor di not save sources
mask[i] = True
# im = cdci_data_analysis.analysis.plot_tools.Image(data2.mosaic_image_0_mosaic.data_unit[4].data,
# data2.mosaic_image_0_mosaic.data_unit[4].header)
# html_dict = im.get_html_draw(w=600,h=600)
# with open('test_%s.html' % sextractor_fname.replace('.fits', ''), 'w') as ff:
# ff.write('''<script src="https://cdn.bokeh.org/bokeh/release/bokeh-2.4.2.min.js"></script>
# <script src="https://cdn.bokeh.org/bokeh/release/bokeh-widgets-2.4.2.min.js"></script>
# ''')
# ff.write(html_dict['div'])
# ff.write(html_dict['script'])
sources2 = sources2[mask]
region_file_name = sextractor_fname.replace('.fits', '.reg')
wrap.write_ds9_region_file(sources2, region_file_name)
subprocess.check_call(['gzip', '-f', os.getcwd()+'/'+sextractor_fname])
subprocess.check_call(['gzip', '-f', os.getcwd()+'/'+outfile_name])
image_product = oda_api.plot_tools.OdaImage(data2)
if not os.path.isdir('out'):
os.mkdir('out')
img_fn = image_product.get_image_for_gallery(sources=sources2, output_folder='out')
# generate one fits files
image_product.write_fits(file_prefix = sanitized_source_title)
#We want a unique file name for each revision
myhash = img_fn.split('.')[1]
#Dumps parameters into pickle
# with open(my_hash+'.pickle', 'wb') as f:
# pickle.dump(_i1, f, pickle.HIGHEST_PROTOCOL)
subprocess.check_call(['cp', '-f', outfile_name + '.gz', myhash+'_'+ outfile_name+'.gz' ])
subprocess.check_call(['cp', '-f', sextractor_fname + '.gz', myhash+'_'+ sextractor_fname+'.gz' ])
subprocess.check_call(['cp', '-f', region_file_name, myhash + '_' + region_file_name ])
#print(wrap2.disp.url)
source.update({
'%s_region_file_name' % instr : region_file_name,
'%s_sextractor_fname' % instr : sextractor_fname,
'%s_myhash' % instr : myhash,
'%s_img_fn' % instr : img_fn,
'%s_source_catalog' % instr : api_cat_str,
'%s_raw_sources' % instr : sources,
'%s_mosaic' % instr : data2,
'%s_sources' % instr : sources2,
"%s_fluxed_catalog" % instr : fluxed_catalog,
"%s_outfile_name" % instr :outfile_name
}
)
```
%% Cell type:markdown id:ef72ad6c tags:
## Upload to the Gallery
%% Cell type:code id:81e79713 tags:
``` python
reload(oda_api.plot_tools)
reload(oda_integral_wrapper.wrapper)
wrap=oda_integral_wrapper.wrapper.INTEGRALwrapper(token=token, integral_data_rights='all-private',
host_type=host_type )
import mosaic.treat
reload(mosaic.treat)
import subprocess
import re
# import pickle
from glob import glob
instruments = []
if use_isgri:
instruments.append('isgri')
if use_jemx1:
instruments.append('jemx1')
if use_jemx2:
instruments.append('jemx2')
if batch_run:
additional_information = _i2
else:
additional_information = _i1
# remove any token from the additional_information
token_pattern = r"token\s*=\s*[\'\"].*?[\'\"]"
additional_information = re.sub(token_pattern, 'token = \"<Insert yout token here>\"', additional_information, flags=re.DOTALL)
host_type_pattern = r"host_type\s*=\s*[\'\"].*?[\'\"]"
additional_information = re.sub(host_type_pattern, 'host_type = \"production\"', additional_information, flags=re.DOTALL)
notebooks_folder_pattern = r"notebooks_folder\s*=\s*[\'\"].*?[\'\"]\n"
additional_information = re.sub(notebooks_folder_pattern, '', additional_information, flags=re.DOTALL)
print(additional_information)
for instr in instruments:
for source in observations:
# if 'Gal' not in source['title'] or 'isgr' not in instr:
# continue
if not ( ('%s_img_fn' % instr) in source):
logger.warning('%s_img_fn is not present, skipping' % instr)
continue
img_fn = source['%s_img_fn' % instr]
api_cat_str = source['%s_source_catalog' % instr]
sources = source['%s_raw_sources' % instr]
data2 = source['%s_mosaic' % instr]
sources2 = source['%s_sources' % instr]
fluxed_catalog = source["%s_fluxed_catalog" % instr]
myhash = source['%s_myhash' % instr]
sextractor_fname = source['%s_sextractor_fname' % instr]
region_file_name = source['%s_region_file_name' % instr]
outfile_name = source['%s_outfile_name' % instr]
if fluxed_catalog is not None:
source_list = list(fluxed_catalog['src_names'])
else:
source_list = [] #[source['title']]
wrap2=oda_integral_wrapper.wrapper.INTEGRALwrapper(token=token, host_type=host_type)
#I build a unique product id
if instr == 'isgri':
isgri_params_dic_product_id.update({'rev_num': str(rev_num),
'obsid': source['obsid']})
source['%s_request_product_id' % instr] = \
oda_api.api.DispatcherAPI.calculate_param_dict_id(isgri_params_dic_product_id)
elif instr == 'jemx1':
jemx1_params_dic_product_id.update({'rev_num': str(rev_num),
'obsid': source['obsid']})
source['jemx1_request_product_id'] = \
oda_api.api.DispatcherAPI.calculate_param_dict_id(jemx1_params_dic_product_id)
elif instr == 'jemx2':
jemx2_params_dic_product_id.update({'rev_num': str(rev_num),
'obsid': source['obsid']})
source['jemx2_request_product_id'] = \
oda_api.api.DispatcherAPI.calculate_param_dict_id(jemx2_params_dic_product_id)
e1 = E1_keV
e2 = E2_keV
if 'jemx' in instr:
e1 = J_E1_keV
e2 = J_E2_keV
nrt_string = ''
if data_version.upper() == 'NRT':
nrt_string = ' (NRT)'
par_dict_gallery = {
'token': token,
'RA' : source['RA'],
'DEC' : source['Dec'],
'e1_kev' : e1,
'e2_kev' : e2,
'product_title' : source['title'] + " Rev. " + str(rev_num) + nrt_string,
'gallery_image_path' : img_fn,
'fits_file_path' : [myhash + '_' + sextractor_fname + '.gz', myhash + '_' + outfile_name + '.gz',
myhash + '_' + region_file_name],
'src_name' : source_list,
'instrument' : instr,
'insert_new_source' : True,
'force_insert_not_valid_new_source' : False,
'validate_source' : True,
'apply_fields_source_resolution': True,
'product_type' : '%s_image' % instr,
'product_id' : source['%s_request_product_id' % instr],
'detected_sources' : wrap2.get_html_from_fluxes(fluxed_catalog,
output_file=outfile_name.replace('_mosaic.fits','_table.html')),
#input parameters assuming they are in cell #1
'additional_information' : additional_information,
'html_image': oda_api.plot_tools.OdaImage.get_js9_html(myhash + '_' + sextractor_fname + '.gz',
region_file = myhash + '_' + region_file_name,
js9_id='myJS9',
base_url='/mmoda/gallery/sites/default/files'),
'produced_by' : origin_notebook,
'obsid' : source['obsid']
}
if 'tstart' in source:
par_dict_gallery.update({'T1' : re.sub('\.\d{3}', '', source['tstart']),
'T2': re.sub('\.\d{3}', '', source['tstop'])})
n_max_tries = 3
n_tries_left = n_max_tries
#print(par_dict_gallery)
while True:
try:
d = wrap2.disp.post_data_product_to_gallery(**par_dict_gallery)
source.update({
"%s_gallery_object" % instr : d.copy(),
})
if 'error_message' not in d:
source.update({
'processed_mosaics': True
})
break
else:
n_tries_left -= 1
if n_tries_left == 0:
break
else:
print(f"Exception while posting a product on the gallery, will re-attempt to post {n_tries_left} times")
except Exception as e:
print(f"Exception while posting a product on the gallery, will re-attemp to post:\n{e}")
n_tries_left -= 1
if n_tries_left == 0:
break
else:
print(f"Exception while posting a product on the gallery, will re-attempt to post {n_tries_left} times")
```
%% Cell type:code id:6abdb110 tags:
``` python
reload(oda_integral_wrapper.wrapper)
wrap=oda_integral_wrapper.wrapper.INTEGRALwrapper(token=token, integral_data_rights='all-private',
host_type=host_type)
```
%% Cell type:code id:50cb6c25 tags:
``` python
reload(oda_integral_wrapper)
if not os.path.isdir(output_yaml_dir):
os.mkdir(output_yaml_dir)
yaml_file_path = os.path.join(output_yaml_dir, 'rev_%s.yaml' % rev_num)
observations = copied_observations = oda_integral_wrapper.wrapper.INTEGRALwrapper.clean_and_update_observations(
observations,
dictionary_to_update={'E1_isgri' : E1_keV,
'E2_isgri' : E2_keV,
'E1_jemx' : J_E1_keV,
'E2_jemx' : J_E2_keV} )
oda_integral_wrapper.wrapper.dump_yaml(copied_observations, yaml_file_path)
```
%% Cell type:code id:81f08bbb tags:
``` python
import yaml
with open(yaml_file_path) as file:
# The FullLoader parameter handles the conversion from YAML
# scalar values to Python the dictionary format
test = yaml.load(file, Loader=yaml.FullLoader)
print(test)
```
%% Cell type:code id:aa7e405b tags:
``` python
time_dict = oda_integral_wrapper.wrapper.INTEGRALwrapper.converttime('REVNUM', rev_num, 'ANY')
utc_start_rev = oda_integral_wrapper.wrapper.INTEGRALwrapper.converttime('IJD', time_dict['IJD'].split()[1],
'UTC')
utc_start_rev = re.sub('\.\d{3}', '', utc_start_rev)
utc_end_rev = oda_integral_wrapper.wrapper.INTEGRALwrapper.converttime('IJD', time_dict['IJD'].split()[2],
'UTC')
utc_end_rev = re.sub('\.\d{3}', '', utc_end_rev)
```
%% Cell type:code id:eb4ccf20 tags:
``` python
reload(oda_integral_wrapper.wrapper)
obsids = [oo['obsid'] for oo in observations]
par_dict_gallery = {
'observation_title' : "rev. %s" % rev_num,
'T1' : utc_start_rev,
'T2' : utc_end_rev,
'yaml_file_path': yaml_file_path,
'obsid' : obsids,
'token' : token,
'create_new': True
}
print(par_dict_gallery)
wrap2=oda_integral_wrapper.wrapper.INTEGRALwrapper(token=token, host_type=host_type)
# try:
d = wrap2.disp.update_observation_with_title(**par_dict_gallery)
# except:
# print("Updating failed, posting")
# d = wrap2.disp.post_observation_to_gallery(**par_dict_gallery)
```
%% Cell type:code id:f400a31e tags:
``` python
print(d.keys())
d['_links']['self']['href']
```
%% Cell type:code id:f772e9b1 tags:
``` python
# %matplotlib notebook
# reload(oda_integral_wrapper.wrapper)
# wrap=oda_integral_wrapper.wrapper.INTEGRALwrapper(token=token, integral_data_rights='all-private', )
# observations[1]['title']
# wrap.plot_image(observations[1]['isgri_raw_mosaic'].mosaic_image_0_mosaic.data_unit[4],
# observations[1]['isgri_sources'])
```
%% Cell type:code id:2f2f41f5 tags:outputs
``` python
status = 1
```
......
%% Cell type:code id:f42afc40 tags:parameters
``` python
import os,sys
rev_num = 0
use_isgri = True
use_jemx1 = True
use_jemx2 = True
E1_keV = "28.0"
E2_keV = "40.0"
J_E1_keV = "3.0"
J_E2_keV = "20.0"
osa_version = 'OSA11.2'
detection_threshold = 7
host_type = 'staging'
data_version = 'CONS'
output_yaml_dir = 'rev' # A folder to store the yaml file
isgri_grouping = [18,150,-30]
isgri_systematic_fraction = 0.015
jemx_systematic_fraction = 0.05
token=''
batch_run = False
notebooks_folder = os.getcwd()
```
%% Cell type:code id:f2f901c0 tags:
``` python
import sys
import oda_integral_wrapper.wrapper
from oda_api.plot_tools import OdaImage
```
%% Cell type:code id:d2508e02 tags:
``` python
import oda_integral_wrapper.itime as itime
if rev_num <= 0:
now = itime.now()
rev_num = int('%04d' % (int(now.REVNUM) + rev_num))
data_version = 'NRT'
use_jemx2 = False
print("We force NRT data and no use of JEM-X2")
else:
rev_num = '%04d' % int(rev_num)
print("Revolution ", rev_num)
```
%% Cell type:code id:97950e40 tags:
``` python
import logging
#logging.getLogger().setLevel(logging.WARNING)
logging.getLogger().setLevel(logging.INFO) #for more verbose logging
logging.getLogger('').addHandler(logging.StreamHandler())
```
%% Cell type:code id:0ba01826 tags:
``` python
import oda_api.token
if token == '':
token = oda_api.token.discover_token()
oda_api.token.decode_oda_token(token)
```
%% Cell type:code id:03b2640b tags:
``` python
import yaml
wrap=oda_integral_wrapper.wrapper.INTEGRALwrapper(token=token, integral_data_rights='all-private',
host_type=host_type)
observation_title="rev. " + str(rev_num)
yaml_file_path = os.path.join('rev', 'rev_%s.yaml' % rev_num)
try:
output_get = wrap.disp.get_yaml_files_observation_with_title(observation_title=observation_title, token=token)
observations = yaml.safe_load(output_get['file_content'])
if output_get['file_content'] == '':
try:
with open(yaml_file_path) as file:
observations = yaml.load(file, Loader=yaml.FullLoader)
except:
raise Exception('The file rev_%s.yaml must exist to process the spectra' % rev_num)
except:
try:
with open(yaml_file_path) as file:
observations = yaml.load(file, Loader=yaml.FullLoader)
except:
raise Exception('The file rev_%s.yaml must be available to process the spectra' % rev_num)
```
%% Cell type:code id:47014b40 tags:
``` python
for source in observations:
tt = source['scw_list']
pp = ["%016.3f" %(float(x)) for x in tt]
source['scw_list'] = pp
print(source['title'], pp)
#print(observations)
```
%% Cell type:code id:f02e1c5b tags:
``` python
import astropy.io.fits as fits
import numpy as np
from astroquery.simbad import Simbad
from astropy import units as u
from astropy.coordinates import SkyCoord
import copy
from importlib import reload
import json
```
%% Cell type:code id:8b5d064b tags:
``` python
## It does not work from papermill
# import ipynbname
# nb_fname = ipynbname.name()
nb_fname="Generic Revolution Spectra.ipynb"
from git import Repo
try:
repo = Repo(notebooks_folder)
repo_name = repo.remotes.origin.url.split('.git')[0]
origin_notebook = repo_name.replace(':','/').replace('git@', 'https://') + \
'/-/blob/master/' + nb_fname
except:
origin_notebook = 'https://gitlab.astro.unige.ch/oda/product-gallery/gallery-notebooks/-/blob/master/' + nb_fname
#origin_notebook = "<a href=\"%s\" target=\"blank\">%s</a>" %(origin_notebook, nb_fname.replace('%20', ' '))
print(origin_notebook)
```
%% Cell type:code id:bc873b69 tags:
``` python
reload(oda_integral_wrapper.wrapper)
wrap=oda_integral_wrapper.wrapper.INTEGRALwrapper(token=token, integral_data_rights='all-private', )
```
%% Cell type:code id:055d4bf6 tags:
``` python
looping = True
import time
# This looping allows us to submit all jobs at once !
n_loops=0
max_loops=200
while looping:
for source in observations:
if 'processed_mosaics' in source and source['processed_mosaics']:
scw_list=source['scw_list']
#print(len(scw_list))
if use_isgri:
if source.get('isgri_spectra', None) is None:
if (source.get('isgri_source_catalog', None) is None) or \
(len(json.loads(source['isgri_source_catalog'])['cat_column_list'][1]) == 0):
print('ISGRI spectra for %s are not extracted because no sources are present' % source['title'])
source.update({'isgri_spectra' : 'Error'})
else:
par_dict = {
'instrument': 'isgri',
'product': 'isgri_spectrum',
'E1_keV' : E1_keV,
'E2_keV' : E2_keV,
'osa_version' : osa_version,
'product_type' : 'Real',
'src_name' : source['title'],
'RA':source['RA'],
'DEC' : source['Dec'],
'selected_catalog': source['isgri_source_catalog']
}
if 'T1' in source.keys():
pardict.update({
'T1' : source['tstart'],
'T2' : source['tstop']
})
try:
data=wrap.long_scw_list_call(scw_list, s_max=500, sleep_time=120,
save_partial_products=False,
wait=False, **par_dict)
source.update({'isgri_spectra' : data})
except:
print('ISGRI spectra for %s failed' % source['title'])
source.update({'isgri_spectra' : 'Error'})
if use_jemx1:
if source.get('jemx1_spectra', None) is None:
if source.get('jemx1_source_catalog',None) is None or \
len(json.loads(source['jemx1_source_catalog'])['cat_column_list'][1]) == 0 :
print('JEMX1 spectra for %s are not extracted because no sources are present' % source['title'])
source.update({'jemx1_spectra' : 'Error'})
else:
par_dict_j = { 'instrument' : 'jemx',
'product' : 'jemx_spectrum',
'jemx_num' : 1,
'E1_keV' : J_E1_keV,
'E2_keV' : J_E2_keV,
'osa_version' : osa_version,
'product_type' : 'Real',
'src_name' : source['title'],
'RA' : source['RA'],
'DEC' : source['Dec'],
'selected_catalog': source['jemx1_source_catalog']
}
if 'tstart' in source.keys():
par_dict_j.update({ 'T1' : source['tstart'],
'T2' : source['tstop'] })
try:
data=wrap.long_scw_list_call(scw_list, s_max=500, sleep_time=120, save_partial_products=False,
wait=False, **par_dict_j )
source.update({'jemx1_spectra' : data})
except:
print('JEM-X1 spectra for %s failed' % source['title'])
source.update({'jemx1_spectra' : 'Error'})
if use_jemx2:
if source.get('jemx2_spectra', None) is None:
if source.get('jemx2_source_catalog', None) is None or \
len(json.loads(source['jemx2_source_catalog'])['cat_column_list'][1]) == 0 :
print('JEMX2 spectra for %s are not extracted because no sources are present' % source['title'])
source.update({'jemx2_spectra' : 'Error'})
else:
par_dict_j = { 'instrument' : 'jemx',
'product' : 'jemx_spectrum',
'jemx_num' : 2,
'E1_keV' : J_E1_keV,
'E2_keV' : J_E2_keV,
'osa_version' : osa_version,
'product_type' : 'Real',
'src_name' : source['title'],
'RA' : source['RA'],
'DEC' : source['Dec'],
'selected_catalog': source['jemx2_source_catalog']
}
if 'tstart' in source.keys():
par_dict_j.update({ 'T1' : source['tstart'],
'T2' : source['tstop'] })
try:
data=wrap.long_scw_list_call(scw_list, s_max=500, sleep_time=120,
save_partial_products=False,
wait=False, **par_dict_j )
source.update({'jemx2_spectra' : data})
except:
print('JEM-X2 spectra for %s failed' % source['title'])
source.update({'jemx2_spectra' : 'Error'})
else:
source.update({'isgri_spectra' : 'Error'})
source.update({'jemx1_spectra' : 'Error'})
source.update({'jemx2_spectra' : 'Error'})
#Checks if I need to ask again
looping=False
for ii in observations:
if use_isgri:
if ii.get('isgri_spectra',None) is None:
print('Need to loop again on isgri for %s ' % ( ii['title']))
looping = True
if use_jemx1:
if ii.get('jemx1_spectra',None) is None:
print('Need to loop again on jemx1 for %s ' % ( ii['title']))
looping = True
if use_jemx2:
if ii.get('jemx2_spectra',None) is None:
print('Need to loop again on jemx2 for %s ' % ( ii['title']))
looping = True
if looping and n_loops < max_loops:
time.sleep(120)
n_loops+=1
else:
looping=False
```
%% Cell type:code id:783a1be1 tags:
``` python
import re
reg_ex_source_title = r'[\W]+'
reload(oda_integral_wrapper.wrapper)
wrap=oda_integral_wrapper.wrapper.INTEGRALwrapper(token=token, integral_data_rights='all-private', )
for source in observations:
sanitized_source_title = re.sub(reg_ex_source_title, '_', source['title'])
sanitized_source_title = oda_integral_wrapper.wrapper.INTEGRALwrapper.clean_source_title(source['title'])
pattern = sanitized_source_title + '_' + str(source['expid'])
if use_isgri:
spectra_isgri=source.get('isgri_spectra', None)
if spectra_isgri is not None and type(spectra_isgri) is not str:
isgri_sources, isgri_specs, isgri_tstarts, isgri_tstops, isgri_exposures = \
wrap.write_all_spectra_fits_files(
spectra_isgri, pattern, grouping=isgri_grouping,
systematic_fraction=isgri_systematic_fraction)
source.update({'isgri_files' : {'fname': isgri_specs, 'tstart':isgri_tstarts,
'tstop': isgri_tstops,
'exposure': isgri_exposures},
'isgri_sources': isgri_sources})
if use_jemx1:
spectra_jemx1 = source.get('jemx1_spectra', None)
#print(type(spectra_jemx1))
if spectra_jemx1 is not None and type(spectra_jemx1) is not str:
jemx1_sources, jemx1_specs, jemx1_tstarts, jemx1_tstops, jemx1_exposures = \
wrap.write_all_spectra_fits_files(
spectra_jemx1, pattern,
systematic_fraction=jemx_systematic_fraction)
source.update({'jemx1_files' : {'fname': jemx1_specs, 'tstart':jemx1_tstarts,
'tstop': jemx1_tstops,
'exposure': jemx1_exposures},
'jemx1_sources': jemx1_sources})
if use_jemx2:
spectra_jemx2 = source.get('jemx2_spectra', None)
#print(type(spectra_jemx1))
if spectra_jemx2 is not None and type(spectra_jemx2) is not str:
jemx2_sources, jemx2_specs, jemx2_tstarts, jemx2_tstops, jemx2_exposures = \
wrap.write_all_spectra_fits_files(
spectra_jemx2, pattern,
systematic_fraction=jemx_systematic_fraction)
source.update({'jemx2_files' : {'fname': jemx2_specs, 'tstart':jemx2_tstarts,
'tstop': jemx2_tstops,
'exposure': jemx2_exposures},
'jemx2_sources': jemx2_sources})
```
%% Cell type:code id:3a980fda tags:
``` python
# Upload spectrum to Gallery
import oda_api.plot_tools
import re
reload(oda_api.plot_tools)
reload(oda_integral_wrapper.wrapper)
wrap2=oda_integral_wrapper.wrapper.INTEGRALwrapper(token=token, host_type=host_type)
instruments = []
if use_isgri:
instruments.append('isgri')
if use_jemx1:
instruments.append('jemx1')
if use_jemx2:
instruments.append('jemx2')
if batch_run:
additional_information = _i2
else:
additional_information = _i1
# remove any token from the additional_information
token_pattern = r"token\s*=\s*[\'\"].*?[\'\"]"
additional_information = re.sub(token_pattern, 'token = \"<Insert yout token here>\"', additional_information, flags=re.DOTALL)
host_type_pattern = r"host_type\s*=\s*[\'\"].*?[\'\"]"
additional_information = re.sub(host_type_pattern, 'host_type = \"production\"', additional_information, flags=re.DOTALL)
notebooks_folder_pattern = r"notebooks_folder\s*=\s*[\'\"].*?[\'\"]\n"
additional_information = re.sub(notebooks_folder_pattern, '', additional_information, flags=re.DOTALL)
for obs in observations:
print(obs)
for ins in instruments:
dict_ins_key = '%s_spectra' % ins
spectra = obs.get(dict_ins_key, None)
if spectra is not None and type(spectra) is not str:
sources = obs['%s_sources' % ins]
image_product = oda_api.plot_tools.OdaSpectrum(spectra)
#print(spectra.as_list())
systematic_fraction = isgri_systematic_fraction
xlim = [20,200]
if 'jemx' in ins:
systematic_fraction = jemx_systematic_fraction
xlim = [2,30]
E1_keV = 1
E2_keV = 100
k1 = 'E1_isgri'
k2 = 'E2_isgri'
if 'jemx' in ins:
k1 = 'E1_jemx'
k2 = 'E2_jemx'
E1_keV = obs.get(k1, E1_keV)
E2_keV = obs.get(k2, E2_keV)
print(f"E1_kev: {E1_keV}")
print(f"E2_keV: {E2_keV}")
print(f"k1: {k1}")
print(f"k2: {k2}")
for i,src in enumerate(sources):
#print(src)
if not os.path.isdir('out'):
os.mkdir('out')
img_fn = image_product.get_image_for_gallery( in_source_name=src,
systematic_fraction=systematic_fraction,
xlim=xlim,
output_folder='out')
par_dict_product_id = {
'source_name': src,
# 't1': obs['tstart'],
# 't2': obs['tstop'],
# 'e1_kev' : E1_keV,
# 'e2_kev' : E2_keV,
'obsid': obs['obsid'],
'instrument' : '%s' % ins,
'product_type' : '%s_spectrum' % ins,
"rev_num" : rev_num
}
product_id = oda_api.api.DispatcherAPI.calculate_param_dict_id(par_dict_product_id)
spec = obs['%s_files' % ins]['fname'][i]
nrt_string = ''
if data_version.upper() == 'NRT':
nrt_string = ' (NRT)'
par_dict={
'token': token,
'T1': re.sub('\.\d{3}', '', obs['tstart']),
'T2': re.sub('\.\d{3}', '', obs['tstop']),
'e1_kev' : E1_keV,
'e2_kev' : E2_keV,
'product_title' : src + ' %s spectrum' % ins + nrt_string,
'gallery_image_path' : img_fn,
'fits_file_path' : [spec, spec.replace('spectrum', 'rmf'),
spec.replace('spectrum', 'arf')],
'src_name' : src,
'instrument' : ins,
'insert_new_source' : True,
'force_insert_not_valid_new_source' : False,
'validate_source' : True,
'apply_fields_source_resolution': True,
'product_type' : '%s_spectrum' % ins ,
'product_id' : product_id,
#input parameters assuming they are in cell #1
'additional_information' : additional_information,
'html_image': image_product.get_html_image(src,
systematic_fraction,
x_range = xlim),
'produced_by' : origin_notebook
}
n_max_tries = 3
n_tries_left = n_max_tries
#print(par_dict)
while True:
try:
d = wrap2.disp.post_data_product_to_gallery(**par_dict)
source.update({
"%s_gallery_object" % ins : d.copy(),
})
if 'error_message' not in d:
source.update({
'processed_spectra': True
})
break
else:
n_tries_left -= 1
if n_tries_left == 0:
break
else:
print(f"Exception while posting a product on the gallery, will re-attempt to post {n_tries_left} times")
except Exception as e:
print(f"Exception while posting a product on the gallery, will re-attemp to post:\n{e}")
n_tries_left -= 1
if n_tries_left == 0:
break
else:
print(f"Exception while posting a product on the gallery, will re-attempt to post {n_tries_left} times")
print(obs)
```
%% Cell type:code id:5e8d334b tags:output
``` python
observations = observations
```
%% Cell type:code id:43e5b419 tags:
``` python
if not os.path.isdir(output_yaml_dir):
os.mkdir(output_yaml_dir)
yaml_file_path = os.path.join(output_yaml_dir, 'rev_%s.yaml' % rev_num)
observations = copied_observations = oda_integral_wrapper.wrapper.INTEGRALwrapper.clean_and_update_observations(
observations, dictionary_to_update={'E1_isgri' : E1_keV,
'E2_isgri' : E2_keV,
'E1_jemx' : J_E1_keV,
'E2_jemx' : J_E2_keV})
oda_integral_wrapper.wrapper.dump_yaml(copied_observations, yaml_file_path)
with open(yaml_file_path) as file:
observations = yaml.load(file, Loader=yaml.FullLoader)
print(observations)
```
%% Cell type:code id:71a172ba tags:
``` python
time_dict = oda_integral_wrapper.wrapper.INTEGRALwrapper.converttime('REVNUM', rev_num, 'ANY')
utc_start_rev = oda_integral_wrapper.wrapper.INTEGRALwrapper.converttime('IJD', time_dict['IJD'].split()[1],
'UTC')
utc_start_rev = re.sub('\.\d{3}', '', utc_start_rev)
utc_end_rev = oda_integral_wrapper.wrapper.INTEGRALwrapper.converttime('IJD', time_dict['IJD'].split()[2],
'UTC')
utc_end_rev = re.sub('\.\d{3}', '', utc_end_rev)
```
%% Cell type:code id:c67d0b1f tags:
``` python
reload(oda_integral_wrapper.wrapper)
obsids = [oo['obsid'] for oo in observations]
par_dict_gallery = {
'observation_title' : "rev. %s" % rev_num,
'T1' : utc_start_rev,
'T2' : utc_end_rev,
'yaml_file_path': yaml_file_path,
'obsid' : obsids,
'token' : token,
'create_new': True
}
wrap2=oda_integral_wrapper.wrapper.INTEGRALwrapper(token=token, host_type=host_type)
d = wrap2.disp.update_observation_with_title(**par_dict_gallery)
```
......
......@@ -15,7 +15,6 @@ GitPython==3.1.27
#cdci_data_analysis
-e git+https://github.com/oda-hub/dispatcher-app.git#egg=cdci_data_analysis
#oda_api from a certain branch
-e git+https://github.com/oda-hub/oda_api.git#egg=oda_api
-e git+https://github.com/oda-hub/oda_api.git@decode-response-after-checking-code#egg=oda_api
#oda-integral-wrapper
-e git+https://gitlab.astro.unige.ch/oda/api-clients/oda_api_wrapper#egg=oda-integral-wrapper
-e git+https://github.com/oda-hub/renku-aqs.git#egg=renku_aqs
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment