def save_constant_absorbed_specs():
    # Fluence bins chosen to have similar statistics between data sets
    bin_edges_lowest = [(0, 0.0329)]
    bin_edges_low = [(0, 2.743), (2.743, 11.830)]
    bin_edges_high = [(15, 25), (25, 95)]
    data_sets = []
    data_lowest_fluence = abs_ana.get_data(LOWEST_FLUENCE_RUNS, True,
                                           (CUTOFF_BOTTOM, LOWEST_CUTOFF))
    data_sets.extend(
        get_fluence_binned_data(data_lowest_fluence,
                                bin_edges_lowest,
                                absorbed=True))
    # use higher fluences of set by having low cutoff at 50 percent rather than 30 percent:
    # (when run with 30th percentile lower bound instead, produces same result
    #  [no changes from ALS] within experimental error)
    data_low_fluence = abs_ana.get_data(LOW_FLUENCE_RUNS, True,
                                        (CUTOFF_BOTTOM, LOW_CUTOFF))
    data_sets.extend(
        get_fluence_binned_data(data_low_fluence, bin_edges_low,
                                absorbed=True))
    data_high_fluence = abs_ana.get_data(HIGH_FLUENCE_RUNS, True,
                                         (CUTOFF_BOTTOM, HIGH_CUTOFF))
    data_sets.extend(
        get_fluence_binned_data(data_high_fluence,
                                bin_edges_high,
                                absorbed=True))
    spectra = []
    for data_set in data_sets:
        spec = abs_ana.get_spectra(data_set, bins=PHOT_BIN_EDGES)
        spectra.append(spec)
    pickle_on = open(CONSTANT_ABSORPTION_FILE, 'wb')
    pickle.dump(spectra, pickle_on)
    pickle_on.close()
def save_incident_specs():
    # Fluence bins chosen to have similar statistics between data sets
    bin_edges_lowest = [(-0.399, 1)]
    bin_edges_low = [(-1, 100)]
    bin_edges_high = [(25, 65), (65, 160)]
    data_sets = []
    data_lowest_fluence = abs_ana.get_data(LOWEST_FLUENCE_RUNS, True,
                                           (CUTOFF_BOTTOM, LOWEST_CUTOFF))
    data_sets.extend(
        get_fluence_binned_data(data_lowest_fluence, bin_edges_lowest))
    # use higher fluences of set by having low cutoff at 50 percent rather than 30 percent:
    data_low_fluence = abs_ana.get_data(LOW_FLUENCE_RUNS, None,
                                        (CUTOFF_BOTTOM, LOW_CUTOFF))
    data_sets.extend(get_fluence_binned_data(data_low_fluence, bin_edges_low))
    data_high_fluence = abs_ana.get_data(HIGH_FLUENCE_RUNS, True,
                                         (CUTOFF_BOTTOM, HIGH_CUTOFF))
    data_sets.extend(get_fluence_binned_data(data_high_fluence,
                                             bin_edges_high))
    spectra = []
    for ind, data_set in enumerate(data_sets):
        spec = abs_ana.get_spectra(data_set, bins=PHOT_BIN_EDGES)
        spectra.append(spec)
    pickle_on = open(INCIDENT_SPEC_FILE, 'wb')
    pickle.dump(spectra, pickle_on)
    pickle_on.close()
Esempio n. 3
0
def plot_profile_lk30(run, coord='sam_x'):
    data = abs_ana.get_data(([run], None, (0, 100)))
    plot_profile(data[coord], data['andor'], data['mcp'])
    plt.title('LK30 Spot size scan, run ' + str(run))
    plt.xlabel(coord + ' (mm)')
    plt.ylabel('Normalized Value')
    save_file_start = '../plots/2016_04_07_spot_size_scan_'
    save_file = ''.join([save_file_start, coord[-1], '_run', str(run), '.png'])
    plt.savefig(save_file)
def calculate_damage_specs(fluence_set):
    max_prev_fluence_key = 'max_prev_fluence'
    data = abs_ana.get_data(HIGH_FLUENCE_RUNS,
                            sample_filter=None,
                            mcp_filter=(CUTOFF_BOTTOM, HIGH_CUTOFF))
    spectra = []
    for fluences in fluence_set:
        data_set = data[data[max_prev_fluence_key] >= fluences[0]]
        data_set = data_set[data_set[max_prev_fluence_key] < fluences[1]]
        spec = abs_ana.get_spectra(data_set, bins=PHOT_BIN_EDGES)
        spectra.append(spec)
    return spectra
Esempio n. 5
0
def get_no_sample_data():
    data = abs_ana.get_data([108], mcp_filter=(0, 100))
    num_points = 100
    # Normalize data for plotting so that scaling is nice:
    mcp_norm = np.amax(data['mcp'][:num_points])
    andor_norm = np.amax(data['andor'][:num_points])
    mcp_raw = data['mcp_uncorrected'][:num_points]/mcp_norm
    mcp = data['mcp'][:num_points]/mcp_norm
    andor_raw = data['andor'][:num_points]/andor_norm
    andor = andor_raw    # Andor values are not corrected
    return {'Raw I0': mcp_raw,
            'Corrected I0': mcp,
            'Raw I1': andor_raw,
            'Corrected I1': andor}
def get_lcls_xas(runs,
                 phot_max=10000,
                 mcp_filter=(CUTOFF_BOTTOM, 95),
                 sample_filter=None,
                 fluence_filter=None):
    lcls_data = abs_ana.get_data(runs, sample_filter, mcp_filter=mcp_filter)
    if fluence_filter is not None:
        lcls_data = lcls_data[lcls_data['fluence_in'] > fluence_filter[0]]
        lcls_data = lcls_data[lcls_data['fluence_in'] < fluence_filter[1]]
    lcls_spec = abs_ana.get_spectra(lcls_data, bins=50)
    lcls_xas = lcls_spec['xas']
    # Take out only LCLS photon energies where we know there
    # are no artifacts
    good = (lcls_xas['phot'] < phot_max) & (~np.isnan(lcls_xas['spec']))
    lcls_xas['spec'] = lcls_xas['spec'][good]
    lcls_xas['phot'] = lcls_xas['phot'][good]
    return lcls_xas
Esempio n. 7
0
def get_no_sample_data():
    no_sample_data = abs_ana.get_data([108], mcp_filter=(0, 100))
    return no_sample_data