def get_fits_info_by_calibration_id(calibration_id): auth.log_visit() out = [] data = list(STIX_MDB.get_calibration_run_fits_info(calibration_id)) if data: row = data[0] out = [{ 'calibration_run_id': calibration_id, 'raw_file_id': row['file_id'], 'fits_filename': row['filename'], 'fits_file_id': row['_id'], 'packet_start_id': row['packet_id_start'], 'packet_end_id': row['packet_id_end'], 'is_complete': row['complete'], 'meas_start_utc': stix_datetime.unix2utc(row['data_start_unix']), 'meas_end_utc': stix_datetime.unix2utc(row['data_end_unix']), 'duration_seconds': row['data_end_unix'] - row['data_start_unix'], 'fits_creation_time': row['creation_time'], }] return json_util.dumps(out)
def parse(cls, packets, dlt=0): if not packets: return {'error': 'Data not available!'} lightcurves = {} unix_time = [] energy_bins = {} last_time = 0 for pkt in packets: packet = sdt.Packet(pkt) if not packet.isa(QLLC_SPID): continue #fig = None scet_coarse = packet[1].raw scet_fine = packet[2].raw start_scet = scet_coarse + scet_fine / 65536. if start_scet <= last_time: continue last_time = start_scet int_duration = (packet[3].raw + 1) * 0.1 detector_mask = packet[4].raw pixel_mask = packet[6].raw num_lc = packet[17].raw compression_s = packet[8].raw compression_k = packet[9].raw compression_m = packet[10].raw if not energy_bins: energy_bin_mask = packet[16].raw energy_bins = get_energy_bins(energy_bin_mask) num_lc_points = packet.get('NIX00270/NIX00271')[0] lc = packet.get('NIX00270/NIX00271/*.eng')[0] rcr = packet.get('NIX00275/*.raw') UTC = packet['header']['UTC'] for i in range(len(lc)): if i not in lightcurves: lightcurves[i] = [] lightcurves[i].extend(lc[i]) unix_time.extend([ stix_datetime.scet2unix(start_scet + dlt + x * int_duration) for x in range(num_lc_points[0]) ]) if not lightcurves: return {'error': 'Data not available!'} return { 'unix_time': unix_time, 'light_curves': {x: lightcurves[x] for x in lightcurves}, 'energy_bins': energy_bins, 'num': len(unix_time), 'start_unix': unix_time[0], 'start_utc': stix_datetime.unix2utc(unix_time[0]), 'end_unix': unix_time[-1], 'end_utc': stix_datetime.unix2utc(unix_time[-1]) }
def create_l1_request(doc): flare_id = doc['flare_id'] flare_entry_id = doc['_id'] start_utc = stix_datetime.unix2utc(doc['start_unix']) duration = int(doc['duration']) flare_entry_ids = doc['_id'] run_ids = doc['run_id'] if doc['total_signal_counts'] < IMAGING_MIN_COUNTS: #don't create L1 requests for small flares return None if doc['peak_counts'] < conf['L1']['flare_min_peak_counts']: #request L1 for small flares #only requesting one time bin left_time_margin = 0 right_time_margin = 0 try: start_unix = doc['PH70_unix'][ 0] #unix time at 70 percent of the maximum end_unix = doc['PH70_unix'][1] duration = int(end_unix - start_unix) start_utc = stix_datetime.unix2utc(start_unix) except KeyError: msg.append( f'Not PH70 data found for flare #{flare_entry_id}, using H90 instead.' ) emax = 13 tunit = duration else: left_time_margin = conf['L1']['time_margin'][0] right_time_margin = conf['L1']['time_margin'][1] tunit = 10 ilc = get_energy_range_upper_limit(doc) emax = EMAX_MAP[ilc] if emax > 17: emax = 17 msg.append(f'Flare # {flare_id} Max energy bin changed to {emax} keV\n') return create_template(flare_id, flare_entry_ids, run_ids, start_utc, duration, emax, left_time_margin, right_time_margin, tunit=tunit, level=1)
def create_backgroun_data_requests(start_date=None): time_ranges, msg = bkg_req.get_background_request_time_ranges( start_date=start_date) print(msg) print('Background requests to be created:') print(time_ranges) forms = [] for t, duration in time_ranges: start_utc = stix_datetime.unix2utc(t) print("Creating data request for:") print("Start time:", start_utc, "Duration:", duration) form = create_template( None, None, None, start_utc, duration, emax=31, left_margin=0, right_margin=0, tunit=duration, level=1, time_tag=0, subject='L1 BKG', purpose='Background', ) print(form) forms.append(form) return forms
def get_calibration_run_elut(utc): unix = sdt.utc2unix(utc) run = list( caldb.find({ 'start_unix_time': { '$lte': unix }, 'analysis_report': { '$exists': True }, 'duration': { '$gt': MIN_CALIBRATION_DURATION } }).sort('start_unix_time', -1).limit(1)) res = {} if run: res = { 'slopes': np.round(run[0]['analysis_report']['slope'], 4), 'offsets': np.round(run[0]['analysis_report']['offset'], 4), 'slope_errors': np.round(run[0]['analysis_report']['slope_error'], 4), 'offset_errors': np.round(run[0]['analysis_report']['offset_error'], 4), 'run_id': run[0]['_id'], 'duration': run[0]['duration'], 'start_unix_time': run[0]['start_unix_time'], 'start_utc': sdt.unix2utc(run[0]['start_unix_time']) } return res
def form_bsd_request_sequence(uid, start_unix, level, detector_mask, tmin, duration, tbin, emin, emax, eunit, pixel_mask=0xfff, action_time='00:00:10'): if level == 5: action_time = '00:00:10' eunit=1 detector_mask=0xFFFFFFFF emin=0 emax=2 start_obt = int(stix_datetime.unix2scet(start_unix)) num_ebins = (emax - emin + 1) / eunit start_utc = stix_datetime.unix2utc(start_unix) data_volume, data_volume_upper_limit,_,_ = sci_volume.estimate_volume( start_utc, duration, tbin, num_ebins, detector_mask, pixel_mask, level) if level==5: tunit=int(tbin) tmax=int(duration*10) else: tunit=int(tbin*10) tmax=int(duration*10) eunit=eunit-1 parameters = [ ['XF417A01', uid], ['XF417A02', level], ['XF417A03', start_obt], ['XF417A04', 0], #subseconds ['XF417A05', "0x%X" % detector_mask], ['XF417A06', tmin], ['XF417A07', tmax], ['XF417A08', tunit], ['XF417A09', emin], ['XF417A10', emax], ['XF417A11', eunit] ] return { 'name': 'AIXF417A', 'actionTime': action_time, 'uid': uid, 'data_volume': data_volume, 'data_volume_upper_limit': data_volume_upper_limit, 'parameters': parameters }
def query_fits_by_tw(utc_begin, utc_end, product_type): auth.log_visit() try: types = get_product_types(product_type) if types: start_unix = parse(utc_begin).timestamp() end_unix = parse(utc_end).timestamp() if end_unix-start_unix > MAX_FITS_QUERY_SPAN: return json_util.dumps({'error':f'Time span not satisfiable. Time span must be < {MAX_FITS_QUERY_SPAN/86400.} days'}) rows = STIX_MDB.get_fits_info_by_time_range( start_unix, end_unix, product_groups=types[0], product_types=types[1], complete='any') result = [] for row in rows: try: creation_time = stix_datetime.format_datetime( row['creation_time']) except Exception as e: creation_time = row['creation_time'] result.append({ 'url': '{}download/fits/filename/{}'.format( request.host_url, row['filename']), 'observation_time_range': [ stix_datetime.unix2utc(row['data_start_unix']), stix_datetime.unix2utc(row['data_end_unix']) ], #'raw_file_id': row['file_id'], 'creation_time': creation_time, 'fits_id':row['_id'] }) else: result = {'error': 'Invalid product filter!'} except Exception as e: result = {'error': str(e)} return json_util.dumps(result)
def request_calibration_30kev_peak_resolution(): data = {} try: start_unix = float(request.values['start_unix']) end_unix = float(request.values['end_unix']) if start_unix == 0 and end_unix == 0: end_unix = time.time() start_unix = end_unix - 15 * 86400 col_db = STIX_MDB.get_collection('calibration_runs') res_time = [] res_sigma = [] run_ids = [] energy = 30.85 runs = col_db.find({ 'start_unix_time': { '$gte': start_unix, '$lt': end_unix, }, 'analysis_report.fit_parameters': { '$exists': True } }) for run in runs: pixel_res = np.zeros(384) for pixel in run['analysis_report']['fit_parameters']: try: idet = pixel['detector'] ipix = pixel['pixel'] pixel_res[ idet * 12 + ipix] = pixel['peaks']['peak1'][2] / ( run['analysis_report']['slope'][idet * 12 + ipix] * energy) * 100 except Exception: pass res_sigma.append(pixel_res.tolist()) res_time.append(stix_datetime.unix2utc(run['start_unix_time'])) run_ids.append(run['_id']) resolution = np.array(res_sigma).T data = { 'time': res_time, 'resolution': resolution.tolist(), 'energy': energy, 'run_ids': run_ids, 'pixel_ids': [i for i in range(384)] } except Exception as e: data = {'error': str(e)} return json_util.dumps(data)
def get_onboard_elut(utc): unix = sdt.utc2unix(utc) elut = {} min_time = 5e9 max_time = 0 #pkt_ids=[] offsets = [0] * 384 slopes = [0] * 384 for i in range(384): pixel_elut = list( scdb.find({ 'pixel_id': i, 'type': 'elut', 'execution_unix': { '$lte': unix } }).sort('execution_unix', -1).limit(1)) if pixel_elut: offsets[i] = pixel_elut[0]['offset'] slopes[i] = pixel_elut[0]['slope'] uptime = pixel_elut[0]['execution_unix'] if uptime < min_time: min_time = uptime if uptime > max_time: max_time = uptime #pkt_ids.append(pixel_elut[0]['packet_id']) elut = { 'slopes': slopes, 'offsets': offsets, 'upload_time_range': [sdt.unix2utc(min_time), sdt.unix2utc(max_time)], 'energy_bin_edges': NOMINAL_EBIN_EDGES, #'packet_ids':pkt_ids } return elut
def create_l4_groups(flare_docs, exclude_existing): group = [] groups = [] num_docs = len(flare_docs) for doc in flare_docs: if mdb.user_data_request_exists(doc['flare_id'], 'Spectrogram'): msg.append(f"L4 request for {doc['flare_id']} exists!") continue if len(group) > 0: if abs(doc['end_unix'] - group[0]['start_unix']) >= ( conf['L4']['group_max_merging_time_gap'] - (conf['L4']['time_margin'][1] - conf['L4']['time_margin'][0])): groups.append(group) group = [] group.append(doc) if group: groups.append(group) msg.append('number of L4 groups: {} \n'.format(len(groups))) forms = [] for gp in groups: start_unix = gp[0]['start_unix'] end_unix = gp[-1]['end_unix'] flare_ids = [d['flare_id'] for d in gp] start_utc = stix_datetime.unix2utc(start_unix) duration = end_unix - start_unix flare_entry_ids = [x['_id'] for x in gp] run_ids = [x['run_id'] for x in gp] ilc = max([get_energy_range_upper_limit(d) for d in gp]) emax = EMAX_MAP[ilc] if emax > 17: emax = 17 msg.append( f'L4 requests for {flare_ids} Max energy bin changed to {emax} keV\n' ) form = create_template(flare_ids, flare_entry_ids, run_ids, start_utc, duration, emax, left_margin=conf['L4']['time_margin'][0], right_margin=conf['L4']['time_margin'][1], tunit=0.5, level=4) forms.append(form) return forms
def retrieve_housekeeping_data(): from sdcweb.core.stix_parameters import get_description as gd result = {} try: start_unix = float(request.values['start_unix']) duration = float(request.values['duration']) data = hkm.request_by_tw(start_unix, duration, [54102]) utc = [sdt.unix2utc(x) for x in data['time']] result['eng_values'] = data['eng'] result['raw_values'] = data['raw'] result['time'] = utc result['names'] = {} for name in data['raw']: desc = gd(name) if desc: result['names'][name] = desc except Exception as e: result = {'error': e} return json_util.dumps(result)
def get_calibration_info_by_fits_id(fits_id): auth.log_visit() out = [] data = STIX_MDB.get_calibration_info_by_fits_id(fits_id) if data: row = data[0] if 'error' not in row: out = [{ 'fits_file_id': fits_id, 'calibration_run_id': row['_id'], 'raw_file_id': row['run_id'], 'meas_start_utc': stix_datetime.unix2utc(row['start_unix_time']), 'duration_seconds': row['duration'], }] else: out = row return json_util.dumps(out)
def get_count_history(runs, emin, emax): data = {'time': [], 'rates': [], 'emax': emax, 'emin': emin, 'num_runs': 0} for run in runs: data['num_runs'] += 1 if 'analysis_report' not in run: continue duration = float(run['auxiliary'][4][1]) / 1000. #live time utc = stix_datetime.unix2utc(run['start_unix_time']) if 'sum_spectra' not in run['analysis_report']: continue total_cnts = 0 for key, spc in run['analysis_report']['sum_spectra'].items(): ex = np.array(spc[0]) index = np.where((ex >= emin) & (ex <= emax)) spectrum = np.array(spc[1]) #differential counts per ADC bin sz = spectrum.size total_cnts += np.sum(spectrum[index]) / sz if total_cnts > 0: data['time'].append(utc) data['rates'].append(total_cnts / duration) return data
def create_template( flare_ids, flare_entry_ids, run_ids, start_utc, duration, emax=13, left_margin=0, right_margin=0, tunit=1, level=1, time_tag=0, subject=None, purpose=None, ): level_name = DATA_LEVEL_NAMES[level] if list( bsd_form.find({ 'flare_id': flare_ids, 'request_type': level_name }).sort('_id', -1)): msg.append(f'data request for Flare {flare_ids} already exists.\n') try: current_id = bsd_form.find().sort('_id', -1).limit(1)[0]['_id'] + 1 except IndexError: current_id = 0 if level not in [1, 4]: msg.append('Not supported data level\n') return if left_margin != 0: start_utc = stix_datetime.unix2utc( stix_datetime.utc2unix(start_utc) + left_margin) if isinstance(flare_ids, list): if len(flare_ids) == 1: flare_ids = flare_ids[0] duration = int(duration - left_margin + right_margin) detector_mask_hex = '0xFFFFFFFF' if level == 1 else '0xFFFFFCFF' pixel_mask_hex = '0xFFF' detector_mask = 0xFFFFFFFF if level == 1 else 0xFFFFFCFF pixel_mask = 0xFFF emin = 1 eunit = 1 num_ebins = (emax - emin + 1) / eunit data_volume, data_volume_upper_limit = sci_volume.estimate_volume( start_utc, duration, tunit, num_ebins, detector_mask, pixel_mask, level) if subject is None: subject = f"Flare {flare_ids}" if not isinstance( flare_ids, list) else 'Flares ' + ', '.join([str(f) for f in flare_ids]) purpose = purpose if purpose is not None else 'Solar Flare' form = { "data_volume": str(math.floor(data_volume)), "data_volume_upper_limit": str(math.floor(data_volume_upper_limit)), "execution_date": '', "author": author['name'], "email": author['email'], "subject": subject, "purpose": purpose, "request_type": level_name, "start_utc": str(start_utc), "start_unix": stix_datetime.utc2unix(start_utc), "end_unix": stix_datetime.utc2unix(start_utc) + duration, "duration": str(duration), "time_bin": str(tunit), "flare_id": flare_ids, 'flare_entry_ids': flare_entry_ids, "detector_mask": detector_mask_hex, "creation_time": datetime.utcnow(), "creator": 'batch_creator', "time_tag": time_tag, "pixel_mask": pixel_mask_hex, "emin": str(emin), "emax": str(emax), 'hidden': False, 'run_id': run_ids, 'status': 0, 'priority': 1, "eunit": str(eunit), '_id': current_id, "description": f"{level_name} data request for {subject}", "volume": str(int(data_volume)), "unique_ids": [] } msg.append(f'Inserting request {form["_id"]}, type: {level_name} \n') msg.append(str(form)) bsd_form.insert_one(form) if not isinstance(flare_entry_ids, list): flare_entry_ids = [flare_entry_ids] for flare_id in flare_entry_ids: request_info = {'level': level, 'request_id': current_id} flare_collection.update_one({'_id': flare_id}, {'$push': { 'data_requests': request_info }}) return form
def get_background_request_time_ranges(min_request_time_interval=24*3600, start_date=None): #create background data request #it should be called after automatic L1 and L4 requests db_request=mdb.get_collection('data_requests') db_qllc=mdb.get_collection('quick_look') last_bkg_request= list(db_request.find({'purpose':'Background','hidden':False}).sort('start_unix',-1).limit(1)) if not last_bkg_request: return [], 'Can not find last background request' last_ql_doc= list(db_qllc.find().sort('start_unix_time',-1).limit(1)) #now we need to request background data between the dates if start_date is None: start=last_bkg_request[0]['start_unix'] else: start=sdt.utc2unix(start_date) end=last_ql_doc[0]['stop_unix_time'] time_range=f'{sdt.unix2utc(start)} {sdt.unix2utc(end)}' msg=f'Time range containing no background requests: {time_range}' print(msg) slots=mdb.find_quiet_sun_periods(start, end,min_duration=MAX_TEMP_CYCLE_PERIOD*NUM_TEMP_CYCLE_REQ) #slots smaller than the min_duration will be excluded if not slots: msg+=f'No quiet sun period found in time range:{time_range}' print(msg) last_request_time=start request_time_ranges=[] for s in slots: start_unix,duration=s if duration<MIN_TEMP_CYCLE_PERIOD*NUM_TEMP_CYCLE_REQ: print("quiet time is too short") continue start_utc=sdt.unix2utc(start_unix) print("Start time",start_utc, 'Last request:', sdt.unix2utc(last_request_time)) if start_unix - last_request_time<min_request_time_interval: #don't request print("ignore, less than 24 hours") continue status=sts.get_stix_status(s[0],s[1]) #get stix status print(status) if status['gaps']<5 and sum(status['modes'][0:4])==0: # 5 minutes, data gap less than 5*64 sec #no change of operation modes period=get_temperature_cycle_period(start_unix) print("temperature cycle",period) if period>=MIN_TEMP_CYCLE_PERIOD: print("this is valid: ", start_utc) request_time_ranges.append((start_unix, period*NUM_TEMP_CYCLE_REQ)) last_request_time=start_unix else: print(start_utc, ' temperature cycle too short') else: print(start_utc, ' stix not in nominal mode') return request_time_ranges,msg