def plot_reflectivity(db_si, db_rh):
    """
	by LW 10/04/2016
	plot measured reflectivity R_Si / R_Rh against theoretical curve for 0.18deg incident angle
	calling sequence: plot_reflectivity(db_si,db_rh)
	db_si: data brooker object for reflectivity scan (E_scan) from Si layer; db_rh: same for Rh layer
	Notes: 1) assumes E_scan was used to obtain the data
	2) same scan range and number of data points for both scans (does not interpolate to common x-grid)
	3) use Ti foil in BPM for scan on elm detector
	"""
    si_dat = get_table(db_si)
    rh_dat = get_table(db_rh)
    en_r = xf.get_EBragg('Si111cryo', -si_dat.dcm_b)
    plt.figure(19)
    plt.semilogy(en_r,
                 si_dat.elm_sum_all / rh_dat.elm_sum_all,
                 label='measured')
    #plt.hold(True)
    r_eng = np.array(
        np.loadtxt("/home/xf11id/Downloads/R_Rh_0p180.txt"))[:, 0] / 1e3
    rsi_0p18 = np.array(np.loadtxt("/home/xf11id/Downloads/R_Si_0p180.txt"))[:,
                                                                             1]
    rrh_0p18 = np.array(np.loadtxt("/home/xf11id/Downloads/R_Rh_0p180.txt"))[:,
                                                                             1]
    plt.semilogy(r_eng, rsi_0p18 / rrh_0p18, 'r--', label="calc 0.18 deg")
    plt.xlabel('E [keV]')
    plt.ylabel('R_Si / R_Rh')
    plt.grid()
    plt.legend()
Пример #2
0
def ss_csv(f_nm, sc_num, motor, det):
    ''' save_scan_csv - usage: 

		   f_nm =string with extention '***.csv'.
                   sc_num = scan number
                   motor name
                   detector        
		   saves in csv format the last run'.
	'''

    from databroker import DataBroker as db, get_table, get_images, get_events
    hdr = db[sc_num]
    if motor == 'time':
        df = get_table(hdr, [det])
    else:
        df = get_table(hdr, [motor, det])
        del df['time']

    f_path = "/direct/XF21ID1/csv_files/" + f_nm

    cols = df.columns.tolist()
    m = cols.index(motor)
    cols.pop(m)
    cols = [motor] + cols
    df = df[cols]

    #        swap_cols(df,df[0].index(motor),0)
    df.to_csv(f_path, index=False)
Пример #3
0
def get_columns(header):
    try:
        columns = get_table(header).columns
        col_list = sorted(list(columns))
        return col_list
    except:
        return []
Пример #4
0
def wrap_plotit(change):
    header = select_scan_id_widget.value
    table = get_table(header)

    det_name = select_I_widget.value

    def plot_mesh(header,
                  table,
                  det_name,
                  vmin=np.nanpercentile(table[det_name], 5),
                  vmax=np.nanpercentile(table[det_name], 95)):
        plt.figure()
        motor_name_0, motor_name_1 = header.start.motors
        shape = header.start.shape
        I = table[det_name].values.reshape(shape)
        motor_0 = table[motor_name_0].values.reshape(shape)
        motor_1 = table[motor_name_1].values.reshape(shape)

        plt.pcolormesh(motor_0, motor_1, I, vmin=vmin, vmax=vmax)
        plt.title("Scan_id {}".format(header.start.scan_id))
        plt.xlabel(motor_name_0)
        plt.ylabel(motor_name_1)
        plt.colorbar(label=det_name)

    interact(plot_mesh,
             header=fixed(header),
             table=fixed(table),
             det_name=fixed(det_name),
             vmin=(table[det_name].min(), table[det_name].max()),
             vmax=(table[det_name].min(), table[det_name].max()))
Пример #5
0
def shallow_header_verify(hdf_path, header):
    table = get_table(header)
    with h5py.File(hdf_path) as f:
        # make sure that the header is actually in the file that we think it is
        # supposed to be in
        assert header.start.uid in f
        assert dict(header.start) == eval(f[header.start.uid].attrs['start'])
        assert dict(header.stop) == eval(f[header.start.uid].attrs['stop'])
        # make sure the descriptors are all in the hdf output file
        for descriptor in header.descriptors:
            descriptor_path = '%s/%s' % (header.start.uid, descriptor.uid)
            assert descriptor_path in f
            # make sure all keys are in each descriptor
            for key in descriptor.data_keys:
                data_path = "%s/data/%s" % (descriptor_path, key)
                # make sure that the data path is in the file
                assert data_path in f
                # make sure the data is equivalent to what comes out of the
                # databroker
                hdf_data = np.asarray(f[data_path])
                broker_data = table[key].dropna().values
                assert all(hdf_data == broker_data)
                # make sure the data is sorted in chronological order
                timestamps_path = "%s/timestamps/%s" % (descriptor_path, key)
                timestamps = np.asarray(f[timestamps_path])
                assert all(np.diff(timestamps) > 0)
Пример #6
0
def get_data(scan_id, field='ivu_gap', intensity_field='elm_sum_all', det=None, debug=False):
    """Get data from the scan stored in the table.
from Maksim
    :param scan_id: scan id from bluesky.
    :param field: visualize the intensity vs. this field.
    :param intensity_field: the name of the intensity field.
    :param det: the name of the detector.
    :param debug: a debug flag.
    :return: a tuple of X, Y and timestamp values.
    """
    scan, t = get_scan(scan_id)
    if det:
        imgs = get_images(scan, det)
        im = imgs[-1]
        if debug:
            print(im)

    table = get_table(scan)
    fields = get_fields(scan)

    if debug:
        print(table)
        print(fields)
    x = table[field]
    y = table[intensity_field]

    return x, y, t
Пример #7
0
def export_scan_scalar( uid, x='dcm_b', y= ['xray_eye1_stats1_total'],
                       path='/XF11ID/analysis/2016_3/commissioning/Results/exported/' ):
    '''export uid data to a txt file
    uid: unique scan id
    x: the x-col 
    y: the y-cols
    path: save path
    Example:
        data = export_scan_scalar( uid, x='dcm_b', y= ['xray_eye1_stats1_total'],
                       path='/XF11ID/analysis/2016_3/commissioning/Results/exported/' )
    A plot for the data:
        d.plot(x='dcm_b', y = 'xray_eye1_stats1_total', marker='o', ls='-', color='r')
        
    '''
    from databroker import DataBroker as db, get_images, get_table, get_events, get_fields 
    #from chxanalys.chx_generic_functions import  trans_data_to_pd
    import numpy as np
    hdr = db[uid]
    print( get_fields( hdr ) )
    data = get_table( db[uid] )
    xp = data[x]
    datap = np.zeros(  [len(xp), len(y)+1])
    datap[:,0] = xp
    for i, yi in enumerate(y):
        datap[:,i+1] = data[yi]
        
    datap = trans_data_to_pd( datap, label=[x] + [yi for yi in y])   
    fp = path + 'uid=%s.csv'%uid
    datap.to_csv( fp )
    print( 'The data was saved in %s'%fp)
    return datap
Пример #8
0
def plot_scan( sid = -1, x=None, y=None ):
    '''plot scan_ids,
        Options: 
        sid: the scan id, a number or a list
        x: the x-axis, a string
        y: the y-axis, a string
    '''
    from databroker import DataBroker as db,  get_table #get_events, get_images,
    import matplotlib.pyplot as plt
    
    if not isinstance(sid,list):sid=[sid]
    if x is None:
        x='time'
    if y is None:
        y='time'
    fig,ax=plt.subplots()    
    for s in sid:
        dat = get_table(  db[s] )
        #print ('here')        
        if x  not in dat.keys():
            print ('Wrong x input!')
            print ('The available X includes:  %s'%dat.keys())
            break
        if y  not in dat.keys():
            print ('Wrong y input!')
            print ('The available Y includes:  %s'%dat.keys())
             
            break
        #datx=dat[x]
        #daty=dat[y]
        #print (x,y)
        
        dat.plot( x=x,y=y,ax=ax, label='sid: %s'%s )
        ax.set_ylabel( y )
Пример #9
0
def shallow_header_verify(hdf_path, header):
    table = get_table(header)
    with h5py.File(hdf_path) as f:
        # make sure that the header is actually in the file that we think it is
        # supposed to be in
        assert header.start.uid in f
        assert dict(header.start) == eval(f[header.start.uid].attrs['start'])
        assert dict(header.stop) == eval(f[header.start.uid].attrs['stop'])
        # make sure the descriptors are all in the hdf output file
        for descriptor in header.descriptors:
            descriptor_path = '%s/%s' % (header.start.uid, descriptor.uid)
            assert descriptor_path in f
            # make sure all keys are in each descriptor
            for key in descriptor.data_keys:
                data_path = "%s/data/%s" % (descriptor_path, key)
                # make sure that the data path is in the file
                assert data_path in f
                # make sure the data is equivalent to what comes out of the
                # databroker
                hdf_data = np.asarray(f[data_path])
                broker_data = table[key].dropna().values
                assert all(hdf_data == broker_data)
                # make sure the data is sorted in chronological order
                timestamps_path = "%s/timestamps/%s" % (descriptor_path, key)
                timestamps = np.asarray(f[timestamps_path])
                assert all(np.diff(timestamps) > 0)
Пример #10
0
def fit_gisaxs_height_scan_profile( uid='-1', x0=0, k=2, A=1, base=0, 
                             motor = 'diff_yh', det =  'eiger4m_single_stats1_total' ):

    '''Fit a GiSAXS scan (diff.yh scan) by a error function
    
       The scan data is first normlized by a simple normalization function:
            (y - y.min()) / (y.max() - y.min())        
       Then fit by error function is defined as  base - A * erf(k*(x-x0))
           erf is Error function is defined by 2/sqrt(pi)*integral(exp(-t**2), t=0..z)
           erf function is import: from scipy.special import erf
       
       Parameters:
           x0: the fit center, by default, 0
           k: the strech factor, by default 2
           A: amplitude of the scan, default 1
           base: baseline of the scan, default 0
       
           uid: the uid of the scan, by default is -1, i.e., the last scan
           motor: the scan motor, by default 'diff.yh'
           det: detector, by default is 'eiger4m_single_stats1_total'
      return:
           the plot of scan and fitted curve
           the fitted x0      
    '''
    
    from lmfit import  Model
    from lmfit import minimize, Parameters, Parameter, report_fit
    from scipy.special import erf
    def norm_y(y ):
        return (y - y.min()) / (y.max() - y.min())
    def err_func(x, x0, k=2, A=1,  base=0 ):        
        return base - A * erf(k*(x-x0))
    
    mod = Model(  err_func )
    pars  = mod.make_params( x0=x0, k=k,  A = A, base = base )    
    if uid == '-1':
        uid = -1
    x = np.array( get_table( db[uid],  fields = [motor],  )[motor] ) 
    y = np.array( get_table( db[uid],  fields = [det],  )[det] )
    ym = norm_y(y)    
    result = mod.fit(ym, pars, x = x )
    
    fig, ax = plt.subplots()
    plot1D( x=x, y = ym, m='o', c='k', ls ='', legend='scan',ax=ax,)
    plot1D( x=x, y = result.best_fit,m='', c='r', ls='-',  legend='fit-x0=%s'%result.best_values['x0'],ax=ax,)
    
    return result.best_values['x0']
Пример #11
0
def wrap_saveit(change):
    header = select_scan_id_widget.value
    table = get_table(header)
    x = table[select_x_widget.value]
    y = table[select_y_widget.value]
    mon = table[select_mon_widget.value]
    data = pd.concat([x, y, mon], axis=1)
    data.to_csv(filename_widget.value)
Пример #12
0
def test_basic_usage():
    for i in range(5):
        insert_run_start(time=float(i), scan_id=i + 1,
                         owner='nedbrainard', beamline_id='example',
                         uid=str(uuid.uuid4()))
    header_1 = db[-1]

    header_ned = db(owner='nedbrainard')
    header_ned = db.find_headers(owner='nedbrainard')  # deprecated API
    header_null = db(owner='this owner does not exist')
    # smoke test
    db.fetch_events(header_1)
    db.fetch_events(header_ned)
    db.fetch_events(header_null)
    list(get_events(header_1))
    list(get_events(header_null))
    get_table(header_1)
    get_table(header_ned)
    get_table(header_null)

    # get events for multiple headers
    list(get_events(db[-2:]))

    # test time shift issue GH9
    table = get_table(db[105])
    assert table.notnull().all().all()
Пример #13
0
def sl_csv(f_nm):
    ''' save_last_csv - usage: sv_csv(f_nm) 
		   f_nm =string with extention '***.csv'.
		   saves in csv format the last run'.
	'''

    from databroker import DataBroker as db, get_table, get_images, get_events
    hdr = db[-1]
    df = get_table(hdr)
    df.to_csv(f_nm, index=False)
Пример #14
0
def test_handler_options(image_example_uid):
    h = db[image_example_uid]
    list(get_events(h))
    list(get_table(h))
    list(get_images(h, "img"))
    res = list(get_events(h, fields=["img"], fill=True, handler_registry={"npy": DummyHandler}))
    res = [ev for ev in res if "img" in ev["data"]]
    res[0]["data"]["img"] == "dummy"
    res = list(get_events(h, fields=["img"], fill=True, handler_overrides={"image": DummyHandler}))
    res = [ev for ev in res if "img" in ev["data"]]
    res[0]["data"]["img"] == "dummy"
    res = get_table(h, ["img"], fill=True, handler_registry={"npy": DummyHandler})
    assert res["img"].iloc[0] == "dummy"
    res = get_table(h, ["img"], fill=True, handler_overrides={"img": DummyHandler})
    assert res["img"].iloc[0] == "dummy"
    res = get_images(h, "img", handler_registry={"npy": DummyHandler})
    assert res[0] == "dummy"
    res = get_images(h, "img", handler_override=DummyHandler)
    assert res[0] == "dummy"
Пример #15
0
def run_show(uid):
    h = db[uid]
    fields = []
    for descriptor in h['descriptors']:
        for field in descriptor['data_keys']:
            fields.append(field)

    table = get_table(h, fill=True)
    bokeh_kw = plot_table_by_time(table)
    return render_template('run_show.html', uid=uid, fields=fields,
                           **bokeh_kw)
Пример #16
0
def test_handler_options(image_example_uid):
    h = db[image_example_uid]
    list(get_events(h))
    list(get_table(h))
    list(get_images(h, 'img'))
    res = list(get_events(h, fields=['img'], fill=True,
                          handler_registry={'npy': DummyHandler}))
    res = [ev for ev in res if 'img' in ev['data']]
    res[0]['data']['img'] == 'dummy'
    res = list(get_events(h, fields=['img'], fill=True,
                          handler_overrides={'image': DummyHandler}))
    res = [ev for ev in res if 'img' in ev['data']]
    res[0]['data']['img'] == 'dummy'
    res = get_table(h, ['img'], fill=True,
                    handler_registry={'npy': DummyHandler})
    assert res['img'].iloc[0] == 'dummy'
    res = get_table(h, ['img'], fill=True,
                    handler_overrides={'img': DummyHandler})
    assert res['img'].iloc[0] == 'dummy'
    res = get_images(h, 'img', handler_registry={'npy': DummyHandler})
    assert res[0] == 'dummy'
    res = get_images(h, 'img', handler_override=DummyHandler)
    assert res[0] == 'dummy'
Пример #17
0
def exp_1D_csv(f_nm, sc_num, motor, det):
    ''' save_scan_csv - usage: 

		   f_nm =string with extention '***.csv'.
                   sc_num = scan number
                   motor name
                   detector        
		   saves in csv format the last run'.
	'''

    hdr = db[sc_num]
    df = get_table(hdr, [det, motor])
    del df['time']
    df.to_csv(f_nm, index=False)
Пример #18
0
def ss_csv(f_nm, sc_num, motor, det):
    ''' save_scan_csv - usage: 

		   f_nm =string with extention '***.csv'.
                   sc_num = scan number
                   motor name
                   detector        
		   saves in csv format the last run'.
	'''

    from databroker import DataBroker as db, get_table, get_images, get_events
    hdr = db[sc_num]
    df = get_table(hdr, [det, motor])
    del df['time']
    df.to_csv(f_nm, index=False)
Пример #19
0
def wrap_plotit(change):
    header = select_scan_id_widget.value
    table = get_table(header)
    x = table[select_x_widget.value].values
    if use_mon_widget.value:
        y = table[select_y_widget.value].values / table[
            select_mon_widget.value].values
    else:
        y = table[select_y_widget.value].values

    label = header.start.scan_id
    plt.plot(x, y, label=label)
    plt.xlabel(select_x_widget.value)
    plt.ylabel(select_y_widget.value)
    plt.legend()

    start_values = pd.Series(
        [getattr(table, col).values[0] for col in table.columns],
        index=table.columns)
    starting_values_display.value = start_values.sort_index().to_string()
Пример #20
0
def test_legacy_config_warnings(RE):
    name = databroker.databroker.SPECIAL_NAME
    assert 'test' in name
    path = os.path.join(os.path.expanduser('~'), '.config', 'databroker',
                        name + '.yml')
    ensure_path_exists(os.path.dirname(path))
    with open(path, 'w') as f:
        yaml.dump(EXAMPLE, f)

    imp.reload(databroker.databroker)
    imp.reload(databroker)
    from databroker import db, DataBroker, get_table, get_events

    RE.subscribe(db.insert)
    uid, = RE(count([det]))
    with pytest.warns(UserWarning):
        assert len(get_table(db[uid]))
    with pytest.warns(UserWarning):
        assert list(get_events(db[uid]))

    # Clean up
    os.remove(path)
Пример #21
0
def test_basic_usage():
    for i in range(5):
        insert_run_start(time=float(i), scan_id=i + 1,
                         owner='nedbrainard', beamline_id='example',
                         uid=str(uuid.uuid4()))
    header_1 = db[-1]

    header_ned = db(owner='nedbrainard')
    header_ned = db.find_headers(owner='nedbrainard')  # deprecated API
    header_null = db(owner='this owner does not exist')
    # smoke test
    db.fetch_events(header_1)
    db.fetch_events(header_ned)
    db.fetch_events(header_null)
    get_events(header_1)
    get_events(header_ned)
    get_events(header_null)
    get_table(header_1)
    get_table(header_ned)
    get_table(header_null)

    # get events for multiple headers
    get_events([header_1, header_ned])
def test_db(sid):
    from databroker import DataBroker as db, get_fields, get_table
    h = db[sid]
    scan = get_table(h)
    fld = get_fields(h)
    print('the scan files include %s' % fld)
Пример #23
0
def get_ID_calibration(gapstart,gapstop,gapstep=.2,gapoff=0):
    """
    by LW 04/20/2015

    function to automatically take a ID calibration curve_fit
    calling sequence: get_ID_calibration(gapstart,gapstop,gapstep=.2,gapoff=0)
	gapstart: minimum gap used in calibration (if <5.2, value will be set to 5.2)

	gapstop: maximum gap used in calibration
	gapstep: size of steps between two gap points
	gapoff: offset applied to calculation gap vs. energy from xfuncs.get_Es(gap-gapoff)

	thermal management of Bragg motor is automatic, waiting for cooling <80C between Bragg scans
    writes outputfile with fitted value for the center of the Bragg scan to:  '/home/xf11id/Repos/chxtools/chxtools/X-ray_database/
	changes 03/18/2016: made compatible with python V3 and latest versio of bluesky (working on it!!!)

    """
    import numpy as np
    #import xfuncs as xf
    #from dataportal import DataBroker as db, StepScan as ss, DataMuxer as dm
    import time
    from epics import caput, caget
    from matplotlib import pyplot as plt
    from scipy.optimize import curve_fit
    gaps = np.arange(gapstart, gapstop, gapstep) - gapoff   # not sure this should be '+' or '-' ...
    print('ID calibration will contain the following gaps [mm]: ',gaps)
    xtal_map = {1: 'Si111cryo', 2: 'Si220cryo'}
    pos_sts_pv = 'XF:11IDA-OP{Mono:DCM-Ax:X}Pos-Sts'
    try:
        xtal = xtal_map[caget(pos_sts_pv)]
    except KeyError:
        raise CHX_utilities_Exception('error: trying to do ID gap calibration with no crystal in the beam')
    print('using', xtal, 'for ID gap calibration')
    # create file for writing calibration data:
    fn='id_CHX_IVU20_'+str(time.strftime("%m"))+str(time.strftime("%d"))+str(time.strftime("%Y"))+'.dat'
    #fpath='/tmp/'
    fpath='/home/xf11id/Repos/chxtools/chxtools/X-ray_database/'
    try:
        outFile = open(fpath+fn, 'w')
        outFile.write('% data from measurements '+str(time.strftime("%D"))+'\n')
        outFile.write('% K colkumn is a placeholder! \n')
        outFile.write('% ID gap [mm]     K      E_1 [keV] \n')
        outFile.close()
        print('successfully created outputfile: ',fpath+fn)
    except:
        raise CHX_utilities_Exception('error: could not create output file')
    
    ### do the scanning and data fitting, file writing,....
    t_adjust=0
    center=[]
    E1=[]
    realgap=[]
    detselect(xray_eye1)
    print(gaps)
    MIN_GAP = 5.2
    for i in gaps:
        if i >= MIN_GAP: 
            B_guess=-1.0*xf.get_Bragg(xtal,xf.get_Es(i+gapoff,5)[1])[0]
        else:
            i = MIN_GAP 
            B_guess=-1.0*xf.get_Bragg(xtal,xf.get_Es(i,5)[1])[0]
        if i > 8 and t_adjust == 0:     # adjust acquistion time once while opening the gap (could write something more intelligent in the long run...)
           exptime=caget('XF:11IDA-BI{Bpm:1-Cam:1}cam1:AcquireTime')
           caput('XF:11IDA-BI{Bpm:1-Cam:1}cam1:AcquireTime',2*exptime)
           t_adjust = 1
        print('initial guess: Bragg= ',B_guess,' deg.   ID gap = ',i,' mm')
        es = xf.get_Es(i, 5)[1]
        mirror_stripe_pos = round(caget('XF:11IDA-OP{Mir:HDM-Ax:Y}Mtr.VAL'),1)
        SI_STRIPE = -7.5 
        RH_STRIPE = 7.5
        if es < 9.5:
            stripe = SI_STRIPE
        elif es >= 9.5:
            stripe = RH_STRIPE
        mov(hdm.y, stripe)
        mov(foil_y, 0)  # Put YAG in beam.
        print('moving DCM Bragg angle to:', B_guess ,'deg and ID gap to', i, 'mm')
        #RE(bp.abs_set(dcm.b, B_guess))
        mov(dcm.b, B_guess)
        #RE(bp.abs_set(ivu_gap,i))
        mov(ivu_gap,i)
        print('hurray, made it up to here!')
        print('about to collect data')
        RE(ascan(dcm.b, float(B_guess-.4), float(B_guess+.4), 60))
        header = db[-1]					#retrive the data (first data point is often "wrong", so don't use
        data = get_table(header)
        B = data.dcm_b[2:]
        intdat = data.xray_eye1_stats1_total[2:] 																	
        B=np.array(B)
        intdat=np.array(intdat)
        A=np.max(intdat)          # initial parameter guess and fitting
        xc=B[np.argmax(intdat)]
        w=.2
        yo=np.mean(intdat)
        p0=[yo,A,xc,w]
        print('initial guess for fitting: ',p0)
        pss = 0
        try:
            coeff,var_matrix = curve_fit(gauss,B,intdat,p0=p0)        
            #center.append(coeff)
            #E1.append(xf.get_EBragg(xtal,-coeff)/5.0)
            realgap.append(caget('SR:C11-ID:G1{IVU20:1-LEnc}Gap'))
#   # append data file by i, 1 & xf.get_EBragg(xtal,-coeff/5.0):
            print('passed the Gaussian trial fit, will use ps now to write data')
            ps()  #this should always work
            Bvalue = ps.cen
            E1.append(xf.get_EBragg(xtal,-Bvalue)/5.0)
            center.append(Bvalue) 
            with open(fpath+fn, "a") as myfile:
                myfile.write(str(caget('SR:C11-ID:G1{IVU20:1-LEnc}Gap'))+'    1.0 '+str(float(xf.get_EBragg(xtal,-Bvalue)/5.0))+'\n')
            print('added data point: ',caget('SR:C11-ID:G1{IVU20:1-LEnc}Gap'),' ',1.0,'     ',str(float(xf.get_EBragg(xtal,-Bvalue)/5.0)))
        except: print('could not evaluate data point for ID gap = ',i,' mm...data point skipped!')
        while caget('XF:11IDA-OP{Mono:DCM-Ax:Bragg}T-I') > 80:
            time.sleep(30)
            print('DCM Bragg axis too hot (>80C)...waiting...')
    plt.close(234)
    plt.figure(234)
    plt.plot(E1,realgap,'ro-')
    plt.xlabel('E_1 [keV]')
    plt.ylabel('ID gap [mm]')
    plt.title('ID gap calibration in file: '+fpath+fn,size=12)
    plt.grid()
Пример #24
0
def E_calibration(file,Edge='Cu',xtal='Si111cryo',B_off=0):
    """
    by LW 3/25/2015
    function to read energy scan file and determine offset correction
    calling sequence: E_calibration(file,Edge='Cu',xtal='Si111cryo',B_off=0)
    file: path/filename of experimental data; 'ia' opens interactive dialog; file can be databrooker object, e.g. file=db[-1] t process data from last scan
    Edge: elment used for calibration
    xtal: monochromator crystal under calibration
    B_off (optional): apply offset to Bragg angle data
    currently there is no check on input parameters!
    """
    # read the data file 
    import csv
    import numpy as np
    import matplotlib.pyplot as plt
    #import xfuncs as xf
    #import Tkinter, tkFileDialog
        
    if file=='ia':          # open file dialog
        print('this would open a file input dialog IF Tkinter was available in the $%^& python environment as it used to')
        #root = Tkinter.Tk()
        #root.withdraw()
        #file_path = tkFileDialog.askopenfilename()
        description=file_path
    elif isinstance(file, str) and file!='ia':
        file_path=file
        descritpion=file_path
    #elif isinstance(file,dict) and 'start' in file.keys():	# some genius decided that db[-1] is no longer a dictionary....
    elif 'start' in file.keys():
       databroker_object=1
       description='scan # ',file.start['scan_id'],' uid: ', file.start['uid'][:10]
    plt.close("all")
    Edge_data={'Cu': 8.979, 'Ti': 4.966}
    if databroker_object !=1:
       Bragg=[]
       Gap=[]
       Intensity=[]
       with open(file_path, 'rb') as csvfile:
           filereader = csv.reader(csvfile, delimiter=' ')
           filereader.next()   # skip header lines
           filereader.next()
           filereader.next()
           for row in filereader:              # read data
               try: Bragg.append(float(row[2]))
               except: print('could not convert: ',row[2])
               try: Gap.append(float(row[5]))
               except: print('could not convert: ',row[5])
               try: Intensity.append(float(row[7]))
               except: print('could not convert: ',row[8])
    elif databroker_object==1:
       data = get_table(file)
       Bragg = data.dcm_b[1:]     #retrive the data (first data point is often "wrong", so don't use
       #Gap = data.SR:C11-ID:G1{IVU20:1_readback[1:] name is messed up in databroker -> currently don't use gap
       Intensity = data.elm_sum_all [1:] 			#need to find signal from electrometer...elm is commented out in detectors at the moment...???														


    B=np.array(Bragg)*-1.0+B_off
    #G=np.array(Gap[0:len(B)])   # not currently used, but converted for future use
    Int=np.array(Intensity[0:len(B)])
        
    # normalize and remove background:
    Int=Int-min(Int)
    Int=Int/max(Int)

    plt.figure(1)
    plt.plot(B,Int,'ko-',label='experimental data')
    plt.plot([xf.get_Bragg(xtal,Edge_data[Edge])[0],xf.get_Bragg(xtal,Edge_data[Edge])[0]],[0,1],'r--',label='Edge for: '+Edge)
    plt.legend(loc='best')
    plt.xlabel(r'$\theta_B$ [deg.]')
    plt.ylabel('intensity')
    plt.title(['Energy Calibration using: ',description])
    plt.grid()
        
    plt.figure(2)
    Eexp=xf.get_EBragg(xtal,B)
    plt.plot(Eexp,Int,'ko-',label='experimental data')
    plt.plot([Edge_data[Edge],Edge_data[Edge]],[0,1],'r--',label='Edge for: '+Edge)
    plt.legend(loc='best')
    plt.xlabel('E [keV.]')
    plt.ylabel('intensity')
    plt.title(['Energy Calibration using: ',description])
    plt.grid()
        
    # calculate derivative and analyze:
    Bragg_Edge=xf.get_Bragg(xtal,Edge_data[Edge])[0]
    plt.figure(3)
    diffdat=np.diff(Int)
    plt.plot(B[0:len(diffdat)],diffdat,'ko-',label='diff experimental data')
    plt.plot([Bragg_Edge,Bragg_Edge],[min(diffdat),max(diffdat)],'r--',label='Edge for: '+Edge)
    plt.legend(loc='best')
    plt.xlabel(r'$\theta_B$ [deg.]')
    plt.ylabel('diff(int)')
    plt.title(['Energy Calibration using: ',description])
    plt.grid()
        
    plt.figure(4)
    plt.plot(xf.get_EBragg(xtal,B[0:len(diffdat)]),diffdat,'ko-',label='diff experimental data')
    plt.plot([Edge_data[Edge],Edge_data[Edge]],[min(diffdat),max(diffdat)],'r--',label='Edge for: '+Edge)
    plt.legend(loc='best')
    plt.xlabel('E [keV.]')
    plt.ylabel('diff(int)')
    plt.title(['Energy Calibration using: ',description])
    plt.grid()
        
    edge_index=np.argmax(diffdat)
    B_edge=xf.get_Bragg(xtal,Edge_data[Edge])[0]
        
    print('') 
    print('Energy calibration for: ',description)
    print('Edge used for calibration: ',Edge)
    print('Crystal used for calibration: ',xtal)
    print('Bragg angle offset: ', B_edge-B[edge_index],'deg. (CHX coordinate system: ',-(B_edge-B[edge_index]),'deg.)')
    print('=> move Bragg to ',-B[edge_index],'deg. and set value to ',-Bragg_Edge,'deg.')
    print( 'Energy offset: ',Eexp[edge_index]-Edge_data[Edge],' keV')