Example #1
0
def get_meas_data(det_no, run_name, database_dir, exp_cal_term, beam_4_mev):
    numpy_arrs = []
    data = get_numpy_arr(database_dir, run_database, run_name, numpy_dir,
                         prefix, True)
    numpy_arrs.append(data)

    ql = []
    for data_index, datum in enumerate(data):
        print data_index, datum
        f_in = np.load(numpy_dir + datum)
        data = f_in['data']

        scatter_index = np.where(
            (data['det_no'] == det_no
             ))[0]  # pat/plastic det 0, bvert det 1, cpvert det 2
        ql_det = remove_temp_cal(data['ql'][scatter_index], exp_cal_term)
        ql.extend(ql_det)
    if beam_4_mev:
        max_data = 35000
    else:
        max_data = 12000
    bin_width = max_data / 500.
    meas_hist, meas_bin_edges = np.histogram(ql,
                                             bins=np.arange(
                                                 0, max_data + bin_width,
                                                 bin_width))
    meas_bin_centers = (meas_bin_edges[:-1] + meas_bin_edges[1:]) / 2
    meas_data = np.array((meas_bin_centers, meas_hist))

    #plt.figure()
    #plt.plot(meas_bin_centers, meas_hist)
    #plt.show()
    return meas_data
Example #2
0
def plot(database_dir, names, det_no):
    run = get_run_names(names, database_dir, run_database)
    plt.figure()
    colors = cm.viridis(np.linspace(0, 1, len(run)))
    for i, r in enumerate(run):
        if 'na_' in r:
            continue
        print r
        data = get_numpy_arr(database_dir, run_database, r, numpy_dir, prefix,
                             True)

        ql = []
        for data_index, datum in enumerate(data):
            print data_index, datum
            f_in = np.load(numpy_dir + datum)
            data = f_in['data']
            ql_det = data['ql'][np.where(
                (data['det_no'] == det_no
                 ))]  # pat/plastic det 0, bvert det 1, cpvert det 2
            ql.extend(ql_det)

        plt.hist(ql,
                 bins=1000,
                 histtype='step',
                 label=r,
                 normed=True,
                 color=colors[i])
        plt.plot([0.476] * 10,
                 np.linspace(0, 4, 10),
                 'k--',
                 linewidth=0.5,
                 alpha=0.25)
        plt.xlim(0, 1.3)
        plt.title(r)
        plt.legend()
def cal_interp(shift_terms, spread_terms, start_file, stop_file, run_names,
               database_dir):
    ''' Interpolates between calibrations to get calibration for each measurement.
        Pass start_file (beginning calibration) and stop_file (end_calibration).
        returns numpy array with groomed numpy file name and calibration factor.
    '''
    #print run_names
    run = get_run_names([run_names], database_dir,
                        run_database)  # all files to parse
    groomed_arrays = []
    #print '\nFiles between start and stop:'
    for i, r in enumerate(run):
        #print r
        cal_names = ('cs', 'na', 'co')
        if any(cal_name in r for cal_name in cal_names):
            continue
        groomed_arrays.extend(
            get_numpy_arr(database_dir, run_database, r, numpy_dir, prefix,
                          True))

    num_files = float(len(groomed_arrays))
    print '\nshift_terms, spread_terms:', shift_terms, spread_terms

    if shift_terms[0] == shift_terms[
            1]:  # accounts from end of 11MeV run with no end calibration
        #print [shift_terms[0]]*len(groomed_arrays)
        return [shift_terms[0]] * len(groomed_arrays), [
            spread_terms[0]
        ] * len(groomed_arrays), groomed_arrays

    else:
        step_shift = (max(shift_terms) - min(shift_terms)) / (num_files - 1)
        interp_shift = np.arange(min(shift_terms),
                                 max(shift_terms) + step_shift / 2, step_shift)

        # account for positive or negative change in shift parameter
        if shift_terms[0] > shift_terms[1]:
            interp_shift = interp_shift[::-1]

        step_spread = (max(spread_terms) - min(spread_terms)) / (num_files - 1)
        interp_spread = np.arange(min(spread_terms),
                                  max(spread_terms) + step_spread / 2,
                                  step_spread)

        if spread_terms[0] > spread_terms[1]:
            interp_spread = interp_spread[::-1]

        print 'interp shift[0], shift[-1]:', interp_shift[0], interp_shift[-1]
        print 'interp spread[0], spread[-1]:', interp_spread[0], interp_spread[
            -1]
        return interp_shift, interp_spread, groomed_arrays
def get_coinc_counts(names, database_dir, run_database, numpy_dir, window):

    runs = get_run_names(names, database_dir, run_database)
    for index, r in enumerate(runs):
        print '\n', r

        #cal_files = ['cs', 'na', 'co']
        #if any(cal in r for cal in cal_files):
        #    print 'Skipping calibration file:', r
        #    continue

        # parse data for ql and tof
        print numpy_dir + r
        data_files = get_numpy_arr(database_dir,
                                   run_database,
                                   r,
                                   numpy_dir,
                                   'comp_coinc_',
                                   groomed=True)

        ql_dets, del_t_dets, qs_dets, backing_idx_dets = [], [], [], []
        for data_index, datum in enumerate(data_files):
            print data_index, datum
            f_in = np.load(numpy_dir + datum)
            data = f_in['data']
            scatter_index = np.where(data['det_no'] == 0)[0]

            det_index = np.where(data['det_no'] == 1)
            det_data = data[det_index[0]]
            backing = det_index[0]

            # search for scatter to backing hits with delta t < 100ns
            j = 0
            i = 0
            delta_t, ql, qs, backing_idx = [], [], [], []
            while j < (len(scatter_index) - 1) and i < (len(backing) - 1):
                if (scatter_index[j] < backing[i]):
                    coincidence = data['trig_offset'][backing[i]] + data['trig_time'][backing[i]] - \
                                  data['trig_offset'][scatter_index[j]] - data['trig_time'][scatter_index[j]]
                    if coincidence < window:
                        delta_t.append(coincidence)
                        ql.append(data['ql'][scatter_index[j]])
                    j += 1
                else:
                    i += 1

            ql_dets.extend(ql)
            del_t_dets.extend(delta_t)

            np.save(save_dir + 'coinc_data_' + str(window) + 'ns',
                    (ql_dets, del_t_dets))
Example #5
0
def update_rec(shift, spread, names, det_no, exp_cal_term, database_dir):
    ''' takes the mean slope (spread) and intercept (shift) terms and applies the linear calibration to the data
    '''
    print '    saving to:', numpy_dir + 'raw_cleaned/'

    runs = get_run_names((names, ), database_dir,
                         run_database)  # all files to parse
    groomed_arrays = []

    # individual angular run names from run_database.txt
    for i, r in enumerate(runs):
        print '        ', r
        cal_names = ('cs', 'na', 'co')
        if any(cal_name in r for cal_name in cal_names):
            continue
        np_files = get_numpy_arr(database_dir, run_database, r, numpy_dir,
                                 prefix, True)

        # individual np files for a given rotation
        for f in np_files:
            data = np.load(numpy_dir + f)
            rec = data['data']
            cal_det_index = np.where(rec['det_no'] == det_no)[0]
            # remove rough calibration for qs and ql
            rec['ql'][cal_det_index] = remove_temp_cal(
                rec['ql'][cal_det_index], exp_cal_term)
            rec['qs'][cal_det_index] = remove_temp_cal(
                rec['qs'][cal_det_index], exp_cal_term)
            # apply calibration for qs and ql
            rec['ql'][cal_det_index] = (rec['ql'][cal_det_index] -
                                        shift) / spread
            rec['qs'][cal_det_index] = (rec['qs'][cal_det_index] -
                                        shift) / spread
            tmp = f.split('.')
            name = tmp[:-1][0]
            np.savez_compressed(numpy_dir + 'raw_cleaned/' + name + '_raw',
                                data=rec)
            print '            ', name, 'saved'