예제 #1
0
def update_rec(shift, spread, names, det_no, exp_cal_term, database_dir):
    ''' takes the mean slope (spread) and intercept (shift) terms and applies the linear calibration to the data
    '''
    print '    saving to:', numpy_dir + 'raw_cleaned/'

    runs = get_run_names((names,), database_dir, run_database) # all files to parse
    groomed_arrays = []

    # individual angular run names from run_database.txt
    for i, r in enumerate(runs):
        print '        ', r
        cal_names = ('cs', 'na', 'co')
        if any(cal_name in r for cal_name in cal_names):
               continue
        np_files = get_numpy_arr(database_dir, run_database, r, numpy_dir, prefix, True)

        # individual np files for a given rotation
        for f in np_files:
            data = np.load(numpy_dir + f)
            rec = data['data']
            cal_det_index = np.where(rec['det_no'] == det_no)[0]
            # remove rough calibration for qs and ql
            rec['ql'][cal_det_index] = remove_temp_cal(rec['ql'][cal_det_index], exp_cal_term)
            rec['qs'][cal_det_index] = remove_temp_cal(rec['qs'][cal_det_index], exp_cal_term)
            # apply calibration for qs and ql
            rec['ql'][cal_det_index] = (rec['ql'][cal_det_index] - shift)/spread
            rec['qs'][cal_det_index] = (rec['qs'][cal_det_index] - shift)/spread
            tmp = f.split('.')
            name = tmp[:-1][0]
            np.savez_compressed(numpy_dir + 'raw_cleaned/' + name + '_raw', data=rec)
            print '            ', name, 'saved'
예제 #2
0
def plot(database_dir, names, det_no):
    run = get_run_names(names, database_dir, run_database)
    plt.figure()
    colors = cm.viridis(np.linspace(0, 1, len(run)))
    for i, r in enumerate(run):
        if 'na_' in r:
            continue
        print r
        data = get_numpy_arr(database_dir, run_database, r, numpy_dir, prefix,
                             True)

        ql = []
        for data_index, datum in enumerate(data):
            print data_index, datum
            f_in = np.load(numpy_dir + datum)
            data = f_in['data']
            ql_det = data['ql'][np.where(
                (data['det_no'] == det_no
                 ))]  # pat/plastic det 0, bvert det 1, cpvert det 2
            ql.extend(ql_det)

        plt.hist(ql,
                 bins=1000,
                 histtype='step',
                 label=r,
                 normed=True,
                 color=colors[i])
        plt.plot([0.476] * 10,
                 np.linspace(0, 4, 10),
                 'k--',
                 linewidth=0.5,
                 alpha=0.25)
        plt.xlim(0, 1.3)
        plt.title(r)
        plt.legend()
def cal_interp(shift_terms, spread_terms, start_file, stop_file, run_names,
               database_dir):
    ''' Interpolates between calibrations to get calibration for each measurement.
        Pass start_file (beginning calibration) and stop_file (end_calibration).
        returns numpy array with groomed numpy file name and calibration factor.
    '''
    #print run_names
    run = get_run_names([run_names], database_dir,
                        run_database)  # all files to parse
    groomed_arrays = []
    #print '\nFiles between start and stop:'
    for i, r in enumerate(run):
        #print r
        cal_names = ('cs', 'na', 'co')
        if any(cal_name in r for cal_name in cal_names):
            continue
        groomed_arrays.extend(
            get_numpy_arr(database_dir, run_database, r, numpy_dir, prefix,
                          True))

    num_files = float(len(groomed_arrays))
    print '\nshift_terms, spread_terms:', shift_terms, spread_terms

    if shift_terms[0] == shift_terms[
            1]:  # accounts from end of 11MeV run with no end calibration
        #print [shift_terms[0]]*len(groomed_arrays)
        return [shift_terms[0]] * len(groomed_arrays), [
            spread_terms[0]
        ] * len(groomed_arrays), groomed_arrays

    else:
        step_shift = (max(shift_terms) - min(shift_terms)) / (num_files - 1)
        interp_shift = np.arange(min(shift_terms),
                                 max(shift_terms) + step_shift / 2, step_shift)

        # account for positive or negative change in shift parameter
        if shift_terms[0] > shift_terms[1]:
            interp_shift = interp_shift[::-1]

        step_spread = (max(spread_terms) - min(spread_terms)) / (num_files - 1)
        interp_spread = np.arange(min(spread_terms),
                                  max(spread_terms) + step_spread / 2,
                                  step_spread)

        if spread_terms[0] > spread_terms[1]:
            interp_spread = interp_spread[::-1]

        print 'interp shift[0], shift[-1]:', interp_shift[0], interp_shift[-1]
        print 'interp spread[0], spread[-1]:', interp_spread[0], interp_spread[
            -1]
        return interp_shift, interp_spread, groomed_arrays
예제 #4
0
def get_coinc_counts(names, database_dir, run_database, numpy_dir, window):

    runs = get_run_names(names, database_dir, run_database)
    for index, r in enumerate(runs):
        print '\n', r

        #cal_files = ['cs', 'na', 'co']
        #if any(cal in r for cal in cal_files):
        #    print 'Skipping calibration file:', r
        #    continue

        # parse data for ql and tof
        print numpy_dir + r
        data_files = get_numpy_arr(database_dir,
                                   run_database,
                                   r,
                                   numpy_dir,
                                   'comp_coinc_',
                                   groomed=True)

        ql_dets, del_t_dets, qs_dets, backing_idx_dets = [], [], [], []
        for data_index, datum in enumerate(data_files):
            print data_index, datum
            f_in = np.load(numpy_dir + datum)
            data = f_in['data']
            scatter_index = np.where(data['det_no'] == 0)[0]

            det_index = np.where(data['det_no'] == 1)
            det_data = data[det_index[0]]
            backing = det_index[0]

            # search for scatter to backing hits with delta t < 100ns
            j = 0
            i = 0
            delta_t, ql, qs, backing_idx = [], [], [], []
            while j < (len(scatter_index) - 1) and i < (len(backing) - 1):
                if (scatter_index[j] < backing[i]):
                    coincidence = data['trig_offset'][backing[i]] + data['trig_time'][backing[i]] - \
                                  data['trig_offset'][scatter_index[j]] - data['trig_time'][scatter_index[j]]
                    if coincidence < window:
                        delta_t.append(coincidence)
                        ql.append(data['ql'][scatter_index[j]])
                    j += 1
                else:
                    i += 1

            ql_dets.extend(ql)
            del_t_dets.extend(delta_t)

            np.save(save_dir + 'coinc_data_' + str(window) + 'ns',
                    (ql_dets, del_t_dets))
예제 #5
0
def simultaneous(adc_ranges, det_no, names, database_dir, sim_files, sim_numpy_files, exp_cal_term, spread, 
                 beam_4_mev, print_info, show_plots):
    ''' simultaneously fits spectra '''

    res_arr = []
    for name in names:
        runs = get_run_names(name, database_dir, run_database)
        if any('co' in r for r in runs):
            co_cal = True
        else:
            co_cal = False

        # adc ranges
        cs_range = adc_ranges[0]
        na_range = adc_ranges[1]
        co_range = adc_ranges[2]

        for idx, run in enumerate(runs):
            if 'cpvert_11MeV_neg15tiltend' in run:
                continue
            # check if co in runs
            tmp1 = run.split('_')
            if len(tmp1) > 5:  
                continue

            print '\n---------------------------------------------------'
            print  run

            if 'cs_' in run:
                sim_data_cs = get_sim_data((sim_files[0],sim_numpy_files[0]))
                meas_data_full_cs = get_meas_data(det_no, run, database_dir, exp_cal_term, beam_4_mev)
                meas_data_cs = fit_range(cs_range[0], cs_range[1], meas_data_full_cs,'meas_data_cs')  #0.35,0.7
                cs_data = [sim_data_cs, meas_data_full_cs, meas_data_cs]
                continue

            if 'na_' in run:
                sim_data_na = get_sim_data((sim_files[2],sim_numpy_files[2]))
                meas_data_full_na = get_meas_data(det_no, run, database_dir, exp_cal_term, beam_4_mev)
                meas_data_na = fit_range(na_range[0], na_range[1], meas_data_full_na,'meas_data_na')  #0.95,1.5
                na_data = [sim_data_na, meas_data_full_na, meas_data_na]       
                fit_params = assign_fit_params(spread)       
                fit_params.add('y_scale_2', value=0.00, min=1e-5, max=0.1, vary=False)        

            if 'co_' in run:
                print 'found co'
                sim_data_co = get_sim_data((sim_files[1],sim_numpy_files[1]))
                meas_data_full_co = get_meas_data(det_no, run, database_dir, exp_cal_term, beam_4_mev)
                meas_data_co = fit_range(co_range[0], co_range[1], meas_data_full_co, 'meas_data_co')   #0.9,1.5
                co_data = [sim_data_co, meas_data_full_co, meas_data_co]
                
                if idx == 1:   
                    # no na22 case
                    fit_params = assign_fit_params(spread)
                    fit_params.add('y_scale_2', value=0.00, min=1e-5, max=0.1, vary=False) 
                    shift, spread, res = spectra_fit(fit_params, cs_data, co_data, print_info=print_info, show_plots=show_plots)
                else:
                    # cs, na, and co case
                    fit_params.add('alpha_3', value=0.0, min=0., max=20., vary=False)
                    fit_params.add('beta_3', value=0.03, min=0, max=0.05, vary=True)
                    fit_params.add('gamma_3', value=0.0, min=0, max=20, vary=False)
                    fit_params.add('c1_3', value=0.01,  vary=True)
                    fit_params.add('c2_3', value=-1,  vary=True)
                    fit_params.add('y_scale_3', value=0.00, min=1e-5, max=0.1, vary=False)
                    res = spectra_fit(fit_params, cs_data, na_data, co_data, print_info=print_info, show_plots=show_plots)

                    res_arr.append(res)
             
            if co_cal == False:
                # cs, na case
                res = spectra_fit(fit_params, cs_data, na_data, print_info=print_info, show_plots=show_plots)

                # 4 mev no co spectra
                res_arr.append(res)

            if show_plots:
                plt.show()

    return res_arr
             ('cs_bvert_4MeV_neg15tilt', 'na_bvert_4MeV_neg15tilt', 'cs_cpvert_4MeV_15tilt', 'na_cpvert_4MeV_15tilt'),
             ('cs_cpvert_4MeV_15tilt', 'na_cpvert_4MeV_15tilt', 'cs_cpvert_4MeV_neg15tilt', 'na_cpvert_4MeV_neg15tilt'),
             ('cs_cpvert_4MeV_neg15tilt', 'na_cpvert_4MeV_neg15tilt', 'cs_bvert_cpvert_end', 'na_bvert_cpvert_end', 'co_bvert_cpvert_end') ]
    #names = [('na_bvert_4MeV_0tilt',)]
    spread = 25000
    adc_ranges = ((7500, 14000), (19000, 30000), (16000, 32000))
    '''

    sim_files = [
        'cs_spec_polimi.log', 'co_spec_polimi.log', 'na_spec_polimi.log'
    ]
    sim_numpy_files = ['cs_spec.npy', 'co_spec.npy',
                       'na_spec.npy']  # numpy files with data from sim_files

    for exp_cal_term, det_no, name in zip(exp_cal_terms, det_nos, names):
        run = get_run_names(name, database_dir, run_database)

        #single(adc_ranges, det_no, name, database_dir, sim_files, sim_numpy_files, run, exp_cal_term, spread, beam_4mev, print_info=True, show_plots=True)
        simultaneous(adc_ranges,
                     det_no,
                     name,
                     database_dir,
                     sim_files,
                     sim_numpy_files,
                     run,
                     exp_cal_term,
                     spread,
                     beam_4mev,
                     save_arrays=False,
                     print_info=True,
                     show_plots=True)