def search(desired_value, *args, **kwargs):
    '''Return all possible header(s) that satisfy your searching criteria

    this function operates in two logics:
    1) When desired_value and args are both given. It will search on all headers matches args = desired_value.
        args can be incomplete and in this case, this function yields multiple searches

    example:
    desired_value = 'TiO2'
    search(desired_value, *'sa') will return all headers that has keys starting with 'sa' and its corresponding
    values is 'TiO2' in metadata dictionary. Nanmely, it yields searchs on headers with sample = TiO2, sadness = TiO2 ...

    2) When desired_value is not given. It implies you already knew your searching criteria and are ready to type them explicitly,
        even with additional constrains.

    example:
    desired_value = 'TiO2'
    search (False, **{'sample_name':desired_value, 'additonal_field': 'additional_value ....}) will return
    headers that have exactly key pairs **{'sample_name':desired_value, 'additonal_field': 'additional_value ....}

    General stratege is to use the first logic to figure out what is your desired key.
    Then user the second logic to restrain your search

    arguments:
    desired_value - str - desired value you are looking for
    args - str - key name you want to search for. It can be fuzzy or complete. If it is fuzzy, all possibility will be listed.
    kwargs - dict - an dictionary that contains exact key-value pairs you want to search for

    '''
    if desired_value and args:
        possible_keys = get_keys(args)
        keychain_list = build_keychain_list(possible_keys, verbose =0)
        search_header_list = []
        for i in range(len(keychain_list)):
            dummy_search_dict = {}
            dummy_search_dict[keychain_list[i]] = desired_value
            dummy_search_dict['group'] = 'XPD' # create an anchor as mongoDB and_search needs at least 2 key-value pairs
            search_header = db(**dummy_search_dict)
            search_header_list.append(search_header)
            print('Your %ith search "%s=%s" yields %i headers' % (i,
                keychain_list[i], desired_value, len(search_header)))
            return search_header_list
    elif not desired_value and kwars:
        if len(kwargs)>1:
            search_header = db(**kwargs)
        elif len(kwargs) == 1:
            kwargs['group'] = 'XPD'
            search_header = db(**kwargs)
        else:
            print('You gave empty search criteria. Please try again')
            return
        return search_header
    else:
        print('Sorry, your search is somehow unrecongnizable. Please make sure you are putting values to right fields')
def time_search(startTime,stopTime=False,exp_day1=False,exp_day2=False):
    '''return list of experiments run in the interval startTime to stopTime

    this function will return a set of events from dataBroker that happened
    between startTime and stopTime on exp_day

    arguments:
    startTime - datetime time object or string or integer - time a the beginning of the
                period that you want to pull data from.  The format could be an integer
                between 0 and 24 to set it at a  whole hour, or a datetime object to do
                it more precisely, e.g., datetime.datetime(13,17,53) for 53 seconds after
                1:17 pm, or a string in the time form, e.g., '13:17:53' in the example above
    stopTime -  datetime object or string or integer - as starTime but the latest time
                that you want to pull data from
    exp_day - str or datetime.date object - the day of the experiment.
    '''
    # date part
    if exp_day1:
        if exp_day2:
            d0 = exp_day1
            d1 = exp_day2
        else:
            d0 = exp_day1
            d1 = d0
    else:
        d0 = datetime.datetime.today().date()
        d1 = d0

    # time part
    if stopTime:

        t0 = datetime.time(startTime)
        t1 = datetime.time(stopTime)

    else:
        now = datetime.datetime.now()
        hr = now.hour
        minu = now.minute
        sec = now.second
        stopTime = datetime.time(hr,minu,sec) # if stopTime is not specified, set current time as stopTime

        t0 = datetime.time(startTime)
        t1 = stopTime

    timeHead = str(d0)+' '+str(t0)
    timeTail = str(d1)+' '+str(t1)

    header_time=db(start_time=timeHead,
                   stop_time=timeTail)

    event_time = get_events(header_time, fill=False)
    event_time = list(event_time)

    print('||You assign a time search in the period:\n'+str(timeHead)+' and '+str(timeTail)+'||' )
    print('||Your search gives out '+str(len(header_time))+' results||')

    return header_time
Beispiel #3
0
def save_tiff(headers, dark_subtraction = True, *, max_count = None):
    ''' save images obtained from dataBroker as tiff format files. It returns nothing.

    arguments:
        headers - list - a list of header objects obtained from a query to dataBroker
    '''
    F_EXTEN = '.tiff'
    e = '''Can not find a proper dark image applied to this header.
        Files will be saved but not no dark subtraction will be applied'''
    is_dark_subtracted = False # assume not has been done
    
    # prepare header
    if type(list(headers)[1]) == str:
        header_list = list()
        header_list.append(headers)
    else:
        header_list = headers

    for header in header_list:
        print('Saving your image(s) now....')
        # information at header level
        img_field = _identify_image_field(header)
        for ev in get_events(header):
            img = ev['data'][img_field]
            ind = ev['seq_num']
            f_name = _feature_gen(header)
            event_timestamp = ev['timestamps']['pe1_image'] # time when triggering area detector
            
            # dark subtration logic 
            if dark_subtraction:
                try:
                    dark_uid_appended = header.start['sc_params']['dk_field_uid']
                    try:
                        # bluesky only looks for uid it defines
                        dark_search = {'group':'XPD','xp_dark_uid':dark_uid_appended} # this should be refine later
                        dark_header = db(**dark_search)
                        dark_img = np.array(get_images(dark_header, img_field)).squeeze()
                    except ValueError: 
                        print(e) # protection. Should not happen
                        dark_img = np.zeros_like(light_imgs)
                    img -= dark_img
                    is_dark_subtracted = True # label it only if it is successfully done
                except KeyError:
                    print(e) # backward support. For scans with auto_dark turned off
                    pass
            # complete file name
            f_name = f_name +_timestampstr(event_timestamp)
            if is_dark_subtracted:
                f_name = 'sub_' + f_name
            if 'temperature' in ev['data']:
                f_name = f_name + '_'+str(ev['data']['temperature'])+'K'
            # index is still needed as we don't want timestamp in file name down to seconds
            combind_f_name = '{}_{}{}'.format(f_name, ind, F_EXTEN)
            w_name = os.path.join(W_DIR, combind_f_name)
            print(w_name)
            #tif.imsave(w_name, img) 
            #if os.path.isfile(w_name):
                #print('image "%s" has been saved at "%s"' % (combind_f_name, W_DIR))
            #else:
                #print('Sorry, something went wrong with your tif saving')
                #return
            if max_count is not None and ind >= max_count:
                break # break the loop if max_count reach or already collect all images
    print('||********Saving process FINISHED********||')