def writeHeader(self, header, overwrite=False): from dataportal import get_events events = get_events(header) for n, e in enumerate(events): fname = self._getOutputFilename(header=header, event=e, index=n) self.writeEvent(fname, e, overwrite=overwrite) return
def time_search(startTime,stopTime=False,exp_day1=False,exp_day2=False): '''return list of experiments run in the interval startTime to stopTime this function will return a set of events from dataBroker that happened between startTime and stopTime on exp_day arguments: startTime - datetime time object or string or integer - time a the beginning of the period that you want to pull data from. The format could be an integer between 0 and 24 to set it at a whole hour, or a datetime object to do it more precisely, e.g., datetime.datetime(13,17,53) for 53 seconds after 1:17 pm, or a string in the time form, e.g., '13:17:53' in the example above stopTime - datetime object or string or integer - as starTime but the latest time that you want to pull data from exp_day - str or datetime.date object - the day of the experiment. ''' # date part if exp_day1: if exp_day2: d0 = exp_day1 d1 = exp_day2 else: d0 = exp_day1 d1 = d0 else: d0 = datetime.datetime.today().date() d1 = d0 # time part if stopTime: t0 = datetime.time(startTime) t1 = datetime.time(stopTime) else: now = datetime.datetime.now() hr = now.hour minu = now.minute sec = now.second stopTime = datetime.time(hr,minu,sec) # if stopTime is not specified, set current time as stopTime t0 = datetime.time(startTime) t1 = stopTime timeHead = str(d0)+' '+str(t0) timeTail = str(d1)+' '+str(t1) header_time=db(start_time=timeHead, stop_time=timeTail) event_time = get_events(header_time, fill=False) event_time = list(event_time) print('||You assign a time search in the period:\n'+str(timeHead)+' and '+str(timeTail)+'||' ) print('||Your search gives out '+str(len(header_time))+' results||') return header_time
def f(name, stop_doc): uid = stop_doc['run_start'] start = run_start_given_uid(uid) descriptors = descriptors_by_start(uid) # For convenience, I'll rely on the broker to get Events. header = db[uid] events = get_events(header) callback.start(start) for d in descriptors: callback.descriptor(d) for e in events: callback.event(e) callback.stop(stop_doc)
def has_pe1signal(header): """Return True if header contains a non-trivial pe1_image_lightfield. pe1_image_lightfield must exist and it must have at least 10 unique values. """ from dataportal import get_images, get_events from filestore.handlers import IntegrityError detname = 'pe1_image_lightfield' if not header.descriptors: return False e = next(get_events(header, fill=False)) if detname not in e.data: return False try: imgs = get_images(header, 'pe1_image_lightfield') except (IntegrityError, IOError): return False A = imgs.get_frame(0) uniquevalues = set(A.flatten()) rv = (len(uniquevalues) >= 10) return rv
def save_tiff(headers, dark_subtraction = True, *, max_count = None): ''' save images obtained from dataBroker as tiff format files. It returns nothing. arguments: headers - list - a list of header objects obtained from a query to dataBroker ''' F_EXTEN = '.tiff' e = '''Can not find a proper dark image applied to this header. Files will be saved but not no dark subtraction will be applied''' is_dark_subtracted = False # assume not has been done # prepare header if type(list(headers)[1]) == str: header_list = list() header_list.append(headers) else: header_list = headers for header in header_list: print('Saving your image(s) now....') # information at header level img_field = _identify_image_field(header) for ev in get_events(header): img = ev['data'][img_field] ind = ev['seq_num'] f_name = _feature_gen(header) event_timestamp = ev['timestamps']['pe1_image'] # time when triggering area detector # dark subtration logic if dark_subtraction: try: dark_uid_appended = header.start['sc_params']['dk_field_uid'] try: # bluesky only looks for uid it defines dark_search = {'group':'XPD','xp_dark_uid':dark_uid_appended} # this should be refine later dark_header = db(**dark_search) dark_img = np.array(get_images(dark_header, img_field)).squeeze() except ValueError: print(e) # protection. Should not happen dark_img = np.zeros_like(light_imgs) img -= dark_img is_dark_subtracted = True # label it only if it is successfully done except KeyError: print(e) # backward support. For scans with auto_dark turned off pass # complete file name f_name = f_name +_timestampstr(event_timestamp) if is_dark_subtracted: f_name = 'sub_' + f_name if 'temperature' in ev['data']: f_name = f_name + '_'+str(ev['data']['temperature'])+'K' # index is still needed as we don't want timestamp in file name down to seconds combind_f_name = '{}_{}{}'.format(f_name, ind, F_EXTEN) w_name = os.path.join(W_DIR, combind_f_name) print(w_name) #tif.imsave(w_name, img) #if os.path.isfile(w_name): #print('image "%s" has been saved at "%s"' % (combind_f_name, W_DIR)) #else: #print('Sorry, something went wrong with your tif saving') #return if max_count is not None and ind >= max_count: break # break the loop if max_count reach or already collect all images print('||********Saving process FINISHED********||')