Beispiel #1
0
def generate_templates(inputfilepath, series, filename_noise_psds, vec_r_lim,
                       mat_theta_lim, filename_templates):
    print('generate_templates')

    E_min = 0.
    E_max = 1E12

    V = get_noise_psds(filename_noise_psds)

    gen = TemplateGeneratorNxM(V, calc_r, calc_theta, E_min, E_max, vec_r_lim,
                               mat_theta_lim)

    dr = DataReader()
    dr.OpenFile(inputfilepath, series, 0)

    event_count = 0

    while dr.LoadEvent(trigger='Trigger'):
        gen.IncludeEvent(dr.GetTraces())
        event_count += 1

        if event_count % STEP_MONITOR == 1:
            print('Event', event_count)

    dr.CloseFile()

    templates = gen.GetTemplates()

    if type(templates) == list:
        map_bins_part = gen.GetMapBinsPart()
        save_templates_nxm(templates, E_min, E_max, map_bins_part,
                           filename_templates)

    gen.Draw(PATH + '/png')
Beispiel #2
0
    def __init__(self, dt, t_pre, U, V, calc_invV=True):

        # dt is width of the time bins
        # t_pre is the interval of time between the start of the trace and the trigger
        # Note that t_pre is positive
        # U is the list of templates
        # V is the cross-correlation function of the noise

        self.dt = dt
        self.t_pre = t_pre
        self.n_trig = int(t_pre / dt)

        self.num_templates = len(U)
        self.num_channels = len(U[0])
        self.num_bins_t = len(U[0][0])

        self.loop_templates = range(self.num_templates)
        self.loop_channels = range(self.num_channels)
        self.loop_bins_t = range(self.num_bins_t)

        self.U = [[[U[i][a][n] for n in self.loop_bins_t]
                   for a in self.loop_channels] for i in self.loop_templates]
        self.N = [
            sum([sum(U[i][a]) for a in self.loop_channels])
            for i in self.loop_templates
        ]

        U_fft = []

        for i in range(len(U)):
            U_fft.append([])

            for a in range(len(U[i])):
                U_fft[-1].append(np.fft.fft(U[i][a]).tolist())

        if (calc_invV):
            self.V_inv = [[[] for b in self.loop_channels]
                          for a in self.loop_channels]

            for n in self.loop_bins_t:
                V_n_inv = np.linalg.inv(
                    [[V[a][b][n] for b in self.loop_channels]
                     for a in self.loop_channels])

                for a in self.loop_channels:
                    for b in self.loop_channels:
                        self.V_inv[a][b].append(V_n_inv[a][b])
            save_noise_psds(self.V_inv, 'inv_psd.gz')

        if (calc_invV == False):
            try:
                self.V_inv = get_noise_psds('inv_psd.gz')
            except:
                print("No inverted covariance matrix to be loaded")

        self.F = []

        for i in self.loop_templates:
            self.F.append([])

            for a in self.loop_channels:
                self.F[-1].append([0.0 for n in self.loop_bins_t])

                for b in self.loop_channels:
                    for n in self.loop_bins_t:
                        self.F[-1][-1][n] += np.conj(
                            U_fft[i][b][n]) * self.V_inv[b][a][n]

        self.P = []

        for i in self.loop_templates:
            self.P.append([])

            for j in self.loop_templates:
                self.P[-1].append(0.0)

                for a in self.loop_channels:
                    for b in self.loop_channels:
                        for n in self.loop_bins_t:
                            self.P[-1][-1] += np.conj(
                                U_fft[i][a]
                                [n]) * U_fft[j][b][n] * self.V_inv[a][b][n]

        self.P = np.real(self.P).tolist()
        self.P_inv = np.linalg.inv(self.P).tolist()

        self.S_fft = [None for a in self.loop_channels]

        self.result = {
            'A0': [None for i in self.loop_templates],
            'chisq0': None,
            'E0': None,
            't0': None,
            'A': [None for i in self.loop_templates],
            'chisq': None,
            'E': None
        }
Beispiel #3
0
def preview_data(inputfilepath, series, filename_noise_psds, stat_estimate):
    print('preview_data')

    E_min = 0.
    E_max = 1E12

    V = get_noise_psds(filename_noise_psds)

    vd = vector_distribution()

    dr = DataReader()
    dr.OpenFile(inputfilepath, series)

    event_count = 0

    while dr.LoadEvent(trigger='Trigger'):
        S = dr.GetTraces()
        dataS = np.asarray(S)
        dataS = np.sum(dataS, axis=0)
        #after the conversion to A too small number, find_peaks doesn't work
        dataS = dataS * 1E7
        peaks, properties = find_peaks(dataS.transpose(),
                                       prominence=1,
                                       width=200)
        if (len(peaks) == 0 or len(peaks) > 1): continue

        #using scipy wiener filter
        noise_w = np.array([V[a][a] for a in range(len(S))])

        amps = [
            sum(wiener(S[a], mysize=75, noise=noise_w[a].real))
            for a in range(len(S))
        ]
        event_count += 1

        if event_count % STEP_MONITOR == 0:
            print('Event', event_count)

        if calc_r(amps) > 5: continue

        E = sum(amps)

        if E > E_min and E < E_max:
            vd.add(get_angle_std(calc_theta(amps)), calc_r(amps))

    dr.CloseFile()

    if vd.get_size() > 0:
        graph = TGraph(vd.get_size(), vd.get_array_x(), vd.get_array_y())

        lines = estimate_bins_part(stat_estimate, vd)
        for line in lines:
            line.SetLineColor(15)

        limits_x = [-math.pi, math.pi]
        limits_y = [
            min([line.GetY1() for line in lines]),
            max([line.GetY1() for line in lines])
        ]
        filename = PATH + '/png/preview_data.png'
        draw_graphs([graph], [2], 0.5, '#theta', 'r', limits_x, limits_y,
                    filename, lines)

        graph.Delete()
Beispiel #4
0
def apply_filters(filename_noise_psds, filename_templates, inputfilepath,
                  series, filename_root):
    print('apply_filters')

    V = get_noise_psds(filename_noise_psds)
    templates, E_min, E_max, map_bins_part = get_templates_nxm(
        filename_templates)

    man = OFManagerNxM(DT, T_PRE, templates, V, calc_r, calc_theta, E_min,
                       E_max, map_bins_part)

    dr = DataReader()
    dr.OpenFile(inputfilepath, series, 0)

    tm_nxm = tree_manager('NxM')

    tm_nxm.Branch('t0')
    tm_nxm.Branch('chisq')
    tm_nxm.Branch('E')

    event_count = 0

    while dr.LoadEvent(trigger='Trigger'):
        S = dr.GetTraces()
        dataS = np.asarray(S)
        dataS = np.sum(dataS, axis=0)
        if (np.mean(dataS[0:3000]) > np.mean(dataS[3000:10000])
                and np.mean(dataS[0:3000]) > np.mean(dataS[10000:15000])
                and np.mean(dataS[0:3000]) > np.mean(dataS[30000:32000])):
            continue
        if (np.mean(dataS[0:3000]) > 1.05 * np.mean(dataS[30000:32000])):
            continue
        if (1.05 * np.mean(dataS[0:3000]) < np.mean(dataS[30000:32000])):
            continue
        dataS = dataS * 1E7
        peaks, properties = find_peaks(dataS.transpose(),
                                       prominence=1,
                                       width=20)
        width_half = peak_widths(dataS.transpose(), peaks, rel_height=0.5)

        if (len(peaks) == 0 or len(peaks) > 1 or any(width_half) > 2000):
            continue
        tmp_S = np.array(S)
        avg = np.mean(tmp_S[:, 0:5000], axis=1)
        for i in range(len(S)):
            S[i] = S[i] - avg[i]

        result = man.ProcessEvent(S)

        event_count += 1

        if event_count % STEP_MONITOR == 0:
            print('Event', event_count)
            man.Draw(PATH + '/png', event_count)

        if type(result) == dict:
            tm_nxm['t0'] = result['t0']
            tm_nxm['chisq'] = result['chisq']
            tm_nxm['E'] = result['E']

        else:
            tm_nxm['t0'] = -999999.0
            tm_nxm['chisq'] = -999999.0
            tm_nxm['E'] = -999999.0

        tm_nxm.Fill()

    dr.CloseFile()

    filepointer = TFile(filename_root, 'recreate')
    tm_nxm.Write()
    filepointer.Close()
Beispiel #5
0
from get_templates_nxm import get_templates_nxm
from OFManagerNxM import OFManagerNxM
from filter_wiener import filter_wiener

integral = []

# path to directory containing data files
filepath = '/gpfs/slac/staas/fs1/g/supercdms//data/CDMS/SLAC/R56/Raw/09190602_1927'

# specifies series to be analyzed
series = ["09190602_1927_F00" + str(i) + ".mid.gz" for i in range(70, 71)]

dr = DataReader()
dr.OpenFile(filepath, series, 0)

V = get_noise_psds('noise_psds.gz')

while dr.LoadEvent(trigger='Trigger'):
    S = dr.GetTraces()
    noise_w = np.array([V[a][a] for a in range(len(S))])
    dataS = np.asarray(S)
    dataS = np.sum(dataS, axis=0)
    if (np.mean(dataS[0:3000]) > np.mean(dataS[3000:10000])
            and np.mean(dataS[0:3000]) > np.mean(dataS[10000:15000])
            and np.mean(dataS[0:3000]) > np.mean(dataS[30000:32000])):
        continue
    if (np.mean(dataS[0:3000]) > 1.05 * np.mean(dataS[30000:32000])): continue
    if (1.05 * np.mean(dataS[0:3000]) < np.mean(dataS[30000:32000])): continue
    dataS = dataS * 1E7
    peaks, properties = find_peaks(dataS.transpose(), prominence=1, width=20)
    width_half = peak_widths(dataS.transpose(), peaks, rel_height=0.5)