Exemplo n.º 1
0
def detect_anomalies_with_rolling_mean(ts, num_stds, window, verbose):
    """Detect outliers in the wheel RPM data by comparing points against [num_stds] standard deviations from a rolling mean.

       Inputs:
           ts [pd Series]:   A pandas Series with a DatetimeIndex and a column for RPM.
           num_stds [float]: The number of standard deviations away from the mean used to define point outliers.
           window [int]:     Window size; the number of samples to include in the rolling mean.
           verbose [bool]:   When True, a plot of the rolling mean will be displayed before outliers are detected.

       Optional Inputs:
           None

       Outputs:
           time_series_with_outliers [pd DataFrame]: A pandas DataFrame with a DatetimeIndex, and columns for RPM (real values) and Outlier (True or False).
           outliers [pd Series]: The detected outliers, as a pandas Series with a DatetimeIndex and a column for the outlier value.

       Optional Outputs:
           None

       Example:
           wheel_rpm_with_outliers, outliers = detect_anomalies_with_rolling_mean(time_series, 2, window, False)
    """

    if window <= 0:
        raise ValueError(
            '\'window\' must be given a value greater than 0 when using rolling mean.'
        )
    else:
        # Gather statistics in preparation for outlier detection
        rolling_mean = ts.rolling(window=window, center=False).mean()
        first_window_mean = ts.iloc[:window].mean()
        for i in range(
                window
        ):  # fill first 'window' samples with mean of those samples
            rolling_mean[i] = first_window_mean
        std = float(ts.values.std(ddof=0))
        X = ts.values
        outliers = pd.Series()
        time_series_with_outliers = pd.DataFrame({'RPM': ts})
        time_series_with_outliers['Outlier'] = 'False'

        if verbose:
            pyplot.plot(ts, color='blue', label='Time Series')
            pyplot.plot(rolling_mean, color='black', label='Rolling Mean')
            pyplot.legend(loc='best')
            pyplot.title('Time Series & Rolling Mean')
            pyplot.show()

        # Start a progress bar
        widgets = [
            progressbar.Percentage(),
            progressbar.Bar(),
            progressbar.Timer(), ' ',
            progressbar.AdaptiveETA()
        ]
        progress_bar_sliding_window = progressbar.ProgressBar(
            widgets=[progressbar.FormatLabel('Wheel RPM Outliers ')] + widgets,
            max_value=int(len(X))).start()

        # Label outliers using standard deviation
        for t in range(len(X)):
            obs = X[t]
            y = rolling_mean[t]
            if abs(y - obs) > std * num_stds:
                time_series_with_outliers.at[ts.index[t], 'Outlier'] = 'True'
                outlier = pd.Series(obs, index=[ts.index[t]])
                outliers = outliers.append(outlier)
            progress_bar_sliding_window.update(t)  # advance progress bar

        return time_series_with_outliers, outliers
Exemplo n.º 2
0
                    valid = True

    train = selection[:args.n // 2]
    test = selection[args.n // 2:]

    print("Selected {} moves for training, {} for testing".format(
        len(train), len(test)))

    import shogi

    print("--------------- Caching Legal Moves ----------------")
    widgets = [
        ' [',
        progressbar.Timer(), '] ',
        progressbar.Bar(), ' (',
        progressbar.AdaptiveETA(), ') ',
        progressbar.Percentage()
    ]
    bar = progressbar.ProgressBar(maxval=len(selection), widgets=widgets)
    completed_games = 0
    total_positions = 0

    # Cache to store all of the legal moves for all the N sample positions
    legal_moves_cache = dict()
    for game in selection:
        pos, move = game[0], game[1]
        board = shogi.Board(pos)

        actions = []
        for move in board.legal_moves:
            # Make the move and record the str representation of the new position=
Exemplo n.º 3
0
def makeProgressBar():
    return progressbar.ProgressBar(widgets=[
        progressbar.Bar(), ' ',
        progressbar.Percentage(), ' ',
        progressbar.AdaptiveETA()
    ])
Exemplo n.º 4
0
def detectStealthCMEsInSOHO(output_path='/Users/shawnpolson/Pictures/stealth_cmes_real_window/'):
    soho = pd.read_csv('/Users/shawnpolson/Documents/School/Spring 2018/Data Mining/StealthCMEs/savesets/Historical CME Data (SOHO).csv')
    print(soho.head())

    #goes_data = readsav('/Users/tyleralbee/Desktop/GoesEventsMegsAEra.sav')

    #1. Add new column to SOHO catalog named "Stealth?"
    sLength = len(soho['Time'])  # number of rows in the SOHO catalog
    soho['Stealth?'] = pd.Series(np.nan, index=soho.index)  # add the new column to the catalog

    csv_filename = output_path + 'stealth_soho_catalog_best_window_cont_{0}.csv'.format(Time.now().iso)
    soho.to_csv(csv_filename, header=True, index=False, mode='w')

    # Start a progress bar
    widgets = [progressbar.Percentage(), progressbar.Bar(), progressbar.Timer(), ' ', progressbar.AdaptiveETA()]

    progress_bar_sliding_window = progressbar.ProgressBar(
        widgets=[progressbar.FormatLabel('Stealth CME Search ')] + widgets,
        max_value=int(sLength)).start()

    #2. Set up loop through SOHO catalog
    for i in range(0, int(sLength)):
        #3. For each row, pull date and time from row
        soho_row = soho.iloc[i]
        date = soho_row['Date']
        time = soho_row['Time']
        timeObj = pd.to_datetime(date + 'T' + time)

        #4. Convert that date/time to a sunpy.time.TimeRange (will need to decide how wide of a time window)
            # From a CME researcher: "an event is likely correlated if the CME SOHO/LASCO start time is between 2 hours before
            # and 4 hours after the dimming start time and within 45 degrees of the dimming location
            # (I convert dimming central coordinate to PA and compare that with CME PA)”.
            # TODO: check angles
        time_range = TimeRange(timeObj - timedelta(hours=4), timeObj + timedelta(hours=2))  # per the above comment
        #5. Send TimeRange to sunpy.instr.goes.get_goes_event_list(timerange, goes_class_filter=None)
        flares = sunpy.instr.goes.get_goes_event_list(time_range, goes_class_filter=None)
        #print(flares)

        #6. If that returns an empty list, add 'yes' to row['Stealth?']
        if (len(flares) == 0):
            soho_row['Stealth?'] = 'yes'
            soho.iloc[i] = soho_row
        #7. Else, add 'no' to row['Stealth?']
        else:
            soho_row['Stealth?'] = 'no'
            soho.iloc[i] = soho_row

        csv_filename = output_path + 'stealth_soho_catalog_best_window_cont_{0}.csv'.format(Time.now().iso)
        soho.to_csv(csv_filename, header=True, index=False, mode='w')
        progress_bar_sliding_window.update(i)  # advance progress bar

    csv_filename = output_path + 'stealth_soho_catalog_best_window_cont_{0}.csv'.format(Time.now().iso)
    soho.to_csv(csv_filename, header=True, index=False, mode='w')
    print("Done!")
Exemplo n.º 5
0
    def scan(self):
        self.pulser_dac_parameters = self.scan_parameters.PlsrDAC
        self.colpr_addr_parameters = self.scan_parameters.Colpr_Addr

        description = np.dtype([
            ('colpr_addr', np.uint32), ('PlsrDAC', np.int32),
            ('voltage', np.float)
        ])  # output data table description, native NumPy dtype
        data = self.raw_data_file.h5_file.create_table(
            self.raw_data_file.h5_file.root,
            name='plsr_dac_data',
            description=description,
            title='Data from PlsrDAC calibration scan')

        progress_bar = progressbar.ProgressBar(
            widgets=[
                '',
                progressbar.Percentage(), ' ',
                progressbar.Bar(marker='*', left='|', right='|'), ' ',
                progressbar.AdaptiveETA()
            ],
            maxval=len(self.pulser_dac_parameters) *
            len(self.colpr_addr_parameters) * self.repeat_measurements,
            term_width=80)
        progress_bar.start()
        progress_bar_index = 0

        for colpr_address in self.colpr_addr_parameters:
            if self.abort_run.is_set():
                break
            self.set_scan_parameters(Colpr_Addr=colpr_address)

            commands = []
            commands.extend(self.register.get_commands("ConfMode"))
            self.register.set_global_register_value("Colpr_Addr",
                                                    colpr_address)
            commands.extend(
                self.register.get_commands("WrRegister", name="Colpr_Addr"))
            commands.extend(self.register.get_commands("RunMode"))
            self.register_utils.send_commands(commands)

            for pulser_dac in self.pulser_dac_parameters:
                if self.abort_run.is_set():
                    break
                self.set_scan_parameters(PlsrDAC=pulser_dac)
                commands = []
                commands.extend(self.register.get_commands("ConfMode"))
                self.register.set_global_register_value("PlsrDAC", pulser_dac)
                commands.extend(
                    self.register.get_commands("WrRegister", name="PlsrDAC"))
                commands.extend(self.register.get_commands("RunMode"))
                self.register_utils.send_commands(commands)

                actual_data = np.zeros(shape=(self.repeat_measurements, ),
                                       dtype=description)
                actual_data['colpr_addr'] = colpr_address
                actual_data["PlsrDAC"] = pulser_dac

                for index, pulser_dac in enumerate(
                        range(self.repeat_measurements)):
                    voltage_string = self.dut['Multimeter'].get_voltage()
                    voltage = float(voltage_string.split(',')[0])

                    actual_data['voltage'][index] = voltage
                    #                     logging.info('Measured %.2fV', voltage)
                    progress_bar_index += 1
                    progress_bar.update(progress_bar_index)
                # append data to HDF5 file
                data.append(actual_data)
        progress_bar.finish()
        data.flush()
Exemplo n.º 6
0
    def scan(self):
        # Output data structures
        data_array = np.zeros(shape=(len(self.scan_parameter_values),
                                     self.max_data_index - 1),
                              dtype=np.float16)
        data_out = self.raw_data_file.h5_file.create_carray(
            self.raw_data_file.h5_file.root,
            name='PlsrDACwaveforms',
            title='Waveforms from transient PlsrDAC calibration scan',
            atom=tb.Atom.from_dtype(data_array.dtype),
            shape=data_array.shape,
            filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
        data_out.attrs.scan_parameter_values = self.scan_parameter_values
        data_out.attrs.dimensions = ['plsrdac', 'time', 'voltage']
        trigger_levels = []

        progress_bar = progressbar.ProgressBar(widgets=[
            '',
            progressbar.Percentage(), ' ',
            progressbar.Bar(marker='*', left='|', right='|'), ' ',
            progressbar.AdaptiveETA()
        ],
                                               maxval=len(
                                                   self.scan_parameter_values),
                                               term_width=80)
        progress_bar.start()

        for index, scan_parameter in enumerate(self.scan_parameter_values):
            # Update PlsrDAC parameter
            self.set_scan_parameters(**{'PlsrDAC':
                                        scan_parameter})  # tell run base
            self.set_scan_parameter('PlsrDAC', scan_parameter)  # set in FE
            # Get actual high level and set trigger level to the middle
            self.dut['Oscilloscope'].set_acquire_mode(
                'SAMPLE')  # clears also averaging storage
            time.sleep(
                1.5
            )  # tektronix needs time to change mode and clear averaging storage (bad programing...)
            self.dut['Oscilloscope'].force_trigger()
            time.sleep(1.5)  # give the trigger some time
            raw_data = self.dut['Oscilloscope'].get_data(channel=self.channel)
            times, voltages = interpret_oscilloscope_data(raw_data)
            trigger_level = (np.amax(voltages) - np.amin(voltages)
                             ) / 2. + self.trigger_level_offset * 1e-3
            self.dut['Oscilloscope'].set_trigger_level(trigger_level)

            if self.show_debug_plots:
                plt.clf()
                plt.grid()
                plt.plot(times * 1e9, voltages * 1e3, label='Data')
                plt.plot(times * 1e9,
                         np.repeat([trigger_level * 1e3], len(times)),
                         '--',
                         label='Trigger (%d mV)' % (trigger_level * 1000))
                plt.xlabel('Time [ns]')
                plt.ylabel('Voltage [mV]')
                plt.legend(loc=0)
                plt.show()

            # Setup data aquisition and start scan loop
            self.dut['Oscilloscope'].set_acquire_mode(
                'AVERAGE'
            )  # average to get rid of noise and keeping high band width
            time.sleep(
                1.5)  # tektronix needs time to change mode (bad programing...)
            super(PlsrDacTransientCalibration, self).scan()  # analog scan loop
            raw_data = self.dut['Oscilloscope'].get_data(channel=self.channel)
            times, voltages = interpret_oscilloscope_data(raw_data)
            data_array[index, :] = voltages[:self.max_data_index]
            trigger_levels.append(
                float(self.dut['Oscilloscope'].get_trigger_level()))
            progress_bar.update(index)

            if self.show_debug_plots:
                plt.clf()
                plt.ylim(0, 1500)
                plt.grid()
                plt.plot(times * 1e9, voltages * 1e3, label='Data')
                plt.plot(times * 1e9,
                         np.repeat([trigger_level * 1e3], len(times)),
                         '--',
                         label='Trigger (%d mV)' % (trigger_level * 1000))
                plt.xlabel('Time [ns]')
                plt.ylabel('Voltage [mV]')
                plt.legend(loc=0)
                plt.show()

        data_out[:] = data_array
        data_out.attrs.trigger_levels = trigger_levels
        data_out.attrs.times = times.tolist()
        progress_bar.finish()
Exemplo n.º 7
0
 def load_data(self, show_warnings, show_progressbar):
     self.all_errors = {}
     self.format_fields = {}
     m = Manager()
     q = m.Queue()
     q_bar = Manager().Queue()
     idxs = range(len(self.found_logs))
     if show_progressbar:
         result = ProgressPool([(process_files,
                                 "{}".format(self.found_logs[i]),
                                 [i, self.found_logs,
                                  self.log_files_format,
                                  self.directory,
                                  self.time_zones,
                                  self.positions,
                                  q,
                                  q_bar,
                                  self.additive_link,
                                  self.user_events,
                                  self.user_hosts,
                                  self.time_ranges,
                                  self.user_vms,
                                  self.vm_timeline,
                                  self.subtasks,
                                  self.needed_lines,
                                  self.real_line_num,
                                  [mes['flow_id']
                                   for l in self.vm_tasks.keys()
                                   for t in self.vm_tasks[l].keys()
                                   for mes in (self.vm_tasks)[l][t]
                                   if ('flow_id' in mes.keys()
                                       and mes['flow_id'] != '')],
                                  show_warnings])
                                for i in idxs], processes=4)
     else:
         result = []
         run_args = [[i, self.found_logs,
                      self.log_files_format,
                      self.directory,
                      self.time_zones,
                      self.positions,
                      q,
                      q_bar,
                      self.additive_link,
                      self.user_events,
                      self.user_hosts,
                      self.time_ranges,
                      self.user_vms,
                      self.vm_timeline,
                      self.subtasks,
                      self.needed_lines,
                      self.real_line_num,
                      [mes['flow_id'] for l in self.vm_tasks.keys()
                       for t in self.vm_tasks[l].keys()
                       for mes in (self.vm_tasks)[l][t] if ('flow_id'
                       in mes.keys() and mes['flow_id'] != '')],
                      show_warnings] for i in idxs]
         widget_style = ['Load: ', progressbar.Percentage(), ' (',
                         progressbar.SimpleProgress(), ')', ' ',
                         progressbar.Bar(), ' ', progressbar.Timer(), ' ',
                         progressbar.AdaptiveETA()]
         sum_lines = []
         for log in self.found_logs:
             sum_lines += [p[1] - p[0] for p in self.positions[log]]
         sum_lines = sum(sum_lines)
         bar = ProgressBar(widgets=widget_style, max_value=sum_lines)
         pos = 0
         with Pool(processes=4) as pool:
             worker = pool.imap(star, run_args)
             while True:
                 try:
                     try:
                         while True:
                             result += [worker.next(0)]
                     except multiprocessing.TimeoutError:
                         pass
                     while not q_bar.empty():
                         pos_tmp, name = q_bar.get()
                         pos += pos_tmp
                         bar.update(pos)
                 except StopIteration:
                     break
         bar.finish()
     for idx, log in enumerate(self.found_logs):
         self.all_errors[log] = result[idx][0]
         # saving logfile format fields names
         self.format_fields[log] = result[idx][1]
     while not q.empty():
         warn = q.get()
         self.out_descr.write(warn)
     if (self.all_errors == {} or all([self.all_errors[l] == []
                                       for l in self.all_errors.keys()])):
         self.out_descr.write('No matches.\n')
         exit()
def create_hit_table(
    input_file_name,
    tdc_calibation_file,
    plsr_dac_calibation_file,
    n_sub_files=8
):  # loops over all root files and merges the data into a hdf5 file aligned at the event number
    print 'Converting data from CERN ROOT TTree to hdf5 table'
    charge_calibration_values, tdc_calibration, tdc_error, tot_calibration, tot_error = get_charge_calibration(
        tdc_calibation_file, plsr_dac_calibation_file)

    # add all files that have the input_file_name praefix and load their data
    input_file_names = [
        input_file_name + '_t%d.root' % index for index in range(n_sub_files)
        if os.path.isfile(input_file_name + '_t%d.root' % index)
    ]
    n_files = len(input_file_names)
    input_files_root = [
        r.TFile(file_name, 'read') for file_name in input_file_names
    ]
    pixel_digits = [
        input_file_root.Get('EventData').Get('Pixel Digits')
        for input_file_root in input_files_root
    ]
    n_hits = [pixel_digit.GetEntries()
              for pixel_digit in pixel_digits]  # total pixel hits to analyze
    n_total_hits = sum(n_hits)

    with tb.open_file(input_file_name + '_interpreted.h5', 'w') as out_file_h5:
        hit_table = out_file_h5.create_table(
            out_file_h5.root,
            name='Hits',
            description=data_struct.HitInfoTable,
            title='hit_data',
            filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))

        # tmp data structures to be filles by ROOT
        data = {}
        for index, pixel_digit in enumerate(pixel_digits):
            column_data = {}
            for branch in pixel_digit.GetListOfBranches(
            ):  # loop over the branches
                column_data[branch.GetName()] = np.zeros(shape=1,
                                                         dtype=np.int32)
                branch.SetAddress(column_data[branch.GetName()].data)
            data[index] = column_data

        # result data structur to be filles in the following loop
        hits = np.zeros((n_total_hits, ),
                        dtype=tb.dtype_from_descr(data_struct.HitInfoTable))

        # get file index with lowest event number
        for pixel_digit in pixel_digits:
            pixel_digit.GetEntry(0)
        min_event_number = min(
            [data[index]['event'][0] for index in range(n_files)])
        actual_file_index = np.where(
            np.array([data[index]['event'][0]
                      for index in range(n_files)]) == min_event_number)[0][0]

        indices = [0] * n_files

        table_index = 0

        actual_data = data[actual_file_index]
        actual_event_number = actual_data['event'][0]
        last_valid_event_number = 0
        last_tdc = 0
        expected_event_number = actual_event_number
        indices[actual_file_index] = 1

        progress_bar = progressbar.ProgressBar(widgets=[
            '',
            progressbar.Percentage(), ' ',
            progressbar.Bar(marker='*', left='|', right='|'), ' ',
            progressbar.AdaptiveETA()
        ],
                                               maxval=n_total_hits,
                                               term_width=80)
        progress_bar.start()

        def add_actual_data(actual_data, table_index):
            if actual_data['column'] >= 0 and actual_data[
                    'column'] < 80 and actual_data['row'] >= 0 and actual_data[
                        'row'] < 336:
                tdc_interpolation = interp1d(
                    x=charge_calibration_values,
                    y=tdc_calibration[actual_data['column'],
                                      actual_data['row']],
                    kind='slinear',
                    bounds_error=False,
                    fill_value=0)
                tdc = tdc_interpolation(actual_data['charge'])
                tot_interpolation = interp1d(
                    x=charge_calibration_values,
                    y=tot_calibration[actual_data['column'],
                                      actual_data['row']],
                    kind='slinear',
                    bounds_error=False,
                    fill_value=0)
                tot = tot_interpolation(actual_data['charge'])

                if math.isnan(
                        tdc
                ):  # do not add hits where tdc is nan, these pixel have a very high threshold or do not work
                    return table_index

                if tdc == 0 and actual_data[
                        'charge'] > 10000:  # no calibration for TDC due to high charge, thus mark as TDC overflow event
                    hits[table_index]['event_status'] |= 0b0000010000000000
                    tdc = 4095

                if tot == 0 and actual_data[
                        'charge'] > 10000:  # no calibration for TOT due to high charge, thus set max tot
                    tot = 13

                hits[table_index]['event_status'] |= 0b0000000100000000
                hits[table_index]['event_number'] = actual_data['event'][
                    0].astype(np.int64)
                hits[table_index]['column'] = (actual_data['column'] +
                                               1).astype(np.uint8)
                hits[table_index]['row'] = (actual_data['row'] + 1).astype(
                    np.uint16)
                hits[table_index]['TDC'] = int(actual_data['charge'] / 300.)
                hits[table_index]['tot'] = int(tot)

                table_index += 1
            return table_index

        while True:
            actual_event_number = actual_data['event'][0]
            if (actual_event_number == expected_event_number
                    or actual_event_number == expected_event_number -
                    1):  # check if event number increases
                actual_index, actual_digits, actual_data = indices[
                    actual_file_index], pixel_digits[actual_file_index], data[
                        actual_file_index]
                table_index = add_actual_data(actual_data, table_index)
            else:  # event number does not increase, thus the events are in another file --> switch file or the event number is missing
                file_event_numbers = [
                    data[file_index]['event'][0]
                    for file_index in range(n_files)
                ]  # all files actual event number
                actual_file_index = np.where(
                    file_event_numbers == min(file_event_numbers))[0][0]
                actual_index, actual_digits, actual_data = indices[
                    actual_file_index], pixel_digits[actual_file_index], data[
                        actual_file_index]
                actual_event_number = actual_data['event'][0]
                table_index = add_actual_data(actual_data, table_index)

            progress_bar.update(table_index)
            expected_event_number = actual_event_number + 1
            actual_digits.GetEntry(actual_index)

            if indices[actual_file_index] < n_hits[
                    actual_file_index]:  # simply stop when the first file is fully iterated
                indices[actual_file_index] += 1
            else:
                break

        # Set missing data and store to file
        hits[:table_index]['LVL1ID'] = hits[:table_index]['event_number'] % 255
        hits[:table_index]['BCID'] = hits[:table_index]['LVL1ID']
        hits[:table_index]['relative_BCID'] = 6
        hit_table.append(hits[:table_index])

        progress_bar.finish()

        for input_file_root in input_files_root:
            input_file_root.Close()
Exemplo n.º 9
0
def get_signal_planar_sensor(x_track, q_in, D, S, n_eff_0, is_ntype, is_oxygenated, V_bias, fluence, temperatur, t, dt, N):
    ''' Function to return the signal of a planar silicon sensor as a function of
        the time. The parameters are:
        x_track: offset from the electrode mean [um]
        q_in: deposited charge [e-h pairs]
        D: sensor thickness [um]
        S: electrode width [um]
        n_eff_0: doping concentration without fluence [10^12 Neq/cm^3]
        isNtype: = 1 for ntype sensor
        isOxygenated: = 1 for oxygenated sensor
        V_bias: bias of the sensor [V]
        fluence: radiation dose [10^12 Neq/cm^3]
        temperatur: temerature [K]
        t: time points of simulation [ns]
        dt: time step of simulation [ns] CHOOSE SMALL ENOUGH!
        N: number of quasi particles along the track
    '''

    # Constants
    Q_e = constants.e  # Electron charge [C]
    e_h = q_in  # deposited charge in electron hole pairs
    x_0 = x_track  # offset from the middle of the sensor pixel (x-direction) [um]

    n_eff = get_eff_acceptor_concentration(fluence, n_eff_0, is_ntype, is_oxygenated)

    if fluence:
        tr_e = get_trapping(fluence * 1e12, is_electron=True, paper=2)  # [ns]
        tr_h = get_trapping(fluence * 1e12, is_electron=False, paper=2)  # [ns]

    e_h = e_h / N

    # Start positions of the electrons
    if N == 1:
        y_e = np.atleast_1d(D / 2.)
    else:
        y_e = np.linspace(0, D, N)

    y_h = y_e.copy()  # start positions of the electron holes
    x_e = np.ones_like(y_h) * x_0  # Positions of the electrons in x
    x_h = x_e.copy()  # Positions of the electron holes in x
    Q_ind_e = np.zeros_like(y_e)  # start induced charge from electrons
    Q_ind_h = np.zeros_like(y_e)  # start induced charge from holes

    Q_ind_e_vec = np.zeros(shape=(y_e.shape[0], t.shape[0]))
    Q_ind_h_vec = np.zeros(shape=(y_e.shape[0], t.shape[0]))
    y_e_vec = np.zeros(shape=(y_e.shape[0], t.shape[0]))
    y_h_vec = np.zeros(shape=(y_e.shape[0], t.shape[0]))
    v_e_vec = np.zeros(shape=(y_e.shape[0], t.shape[0]))
    v_h_vec = np.zeros(shape=(y_e.shape[0], t.shape[0]))
    E_q_y_e_vec = np.zeros(shape=(y_e.shape[0], t.shape[0]))
    E_q_y_h_vec = np.zeros(shape=(y_e.shape[0], t.shape[0]))
    E_w_y_e_vec = np.zeros(shape=(y_e.shape[0], t.shape[0]))
    E_w_y_h_vec = np.zeros(shape=(y_e.shape[0], t.shape[0]))
    Phi_w_y_e_vec = np.zeros(shape=(y_e.shape[0], t.shape[0]))
    Phi_w_y_h_vec = np.zeros(shape=(y_e.shape[0], t.shape[0]))

    progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(t), term_width=80)
    progress_bar.start()

    for index, i in enumerate(t):  # time loop
        # Electric, weighting field a charge carrier positions
        # Electrons:
        _, E_q_y_e = get_field(x=x_e,  # Electric field [V/um]
                               y=y_e,
                               V_bias=V_bias,
                               n_eff=n_eff,
                               D=D,
                               S=None,  # FIXME
                               is_planar=True)

        Phi_w_e = get_weighting_potential(x=x_e,
                                          y=y_e,
                                          D=D,
                                          S=S,
                                          is_planar=True)

        _, E_w_y_e = get_weighting_field(x=x_e,
                                         y=y_e,
                                         D=D,
                                         S=S,
                                         is_planar=True)

        # Holes
        _, E_q_y_h = get_field(x=x_h,  # Electric field [V/um]
                               y=y_h,
                               V_bias=V_bias,
                               n_eff=n_eff,
                               D=D,
                               S=None,  # FIXME
                               is_planar=True)

        Phi_w_h = get_weighting_potential(x=x_h,
                                          y=y_h,
                                          D=D,
                                          S=S,
                                          is_planar=True)

        _, E_w_y_h = get_weighting_field(x=x_h,
                                         y=y_h,
                                         D=D,
                                         S=S,
                                         is_planar=True)

        # Movement:

        # Electrons
        v_e = - E_q_y_e * get_mobility(E_q_y_e * 1e5,  # Velocity [um/ns]
                                       temperature=temperatur,
                                       is_electron=True)
        dy_e = v_e * dt
        y_e = y_e + dy_e
        # Boundaries
        e_in_boundary = np.logical_and(y_e <= D, y_e >= 0)
        E_w_y_e[~e_in_boundary] = 0
        E_q_y_e[~e_in_boundary] = 0
        v_e[~e_in_boundary] = 0
        y_e[~e_in_boundary] = D
        E_w_y_e[~e_in_boundary] = 0
        E_q_y_e[~e_in_boundary] = 0
        v_e[~e_in_boundary] = 0
        y_e[~e_in_boundary] = 0

        # Holes
        v_h = E_q_y_h * get_mobility(E_q_y_h * 1e5,  # Velocity [um/ns]
                                     temperature=temperatur,
                                     is_electron=False)
        dy_h = v_h * dt
        y_h = y_h + dy_h
        # Boundaries
        h_in_boundary = np.logical_and(y_h <= D, y_h >= 0)
        E_w_y_h[~h_in_boundary] = 0
        E_q_y_h[~h_in_boundary] = 0
        v_h[~h_in_boundary] = 0
        y_h[~h_in_boundary] = 0
        E_w_y_h[~h_in_boundary] = 0
        E_q_y_h[~h_in_boundary] = 0
        v_h[~h_in_boundary] = 0
        y_h[~h_in_boundary] = D

        # Induced charge calculation with trapping
        # electrons
        dQ_e = np.zeros_like(y_e)
        dQ_e[e_in_boundary] = e_h * E_w_y_e[e_in_boundary] * dy_e[e_in_boundary]
        if fluence:
            dQ_e[e_in_boundary] *= np.exp(-i / tr_e)

        # Holes
        dQ_h = np.zeros_like(y_h)
        dQ_h[h_in_boundary] = -e_h * E_w_y_h[h_in_boundary] * dy_h[h_in_boundary]
        if fluence:
            dQ_h[h_in_boundary] *= np.exp(-i / tr_h)

        # Sum up
        Q_ind_e += dQ_e
        Q_ind_h += dQ_h

        # Data for plotting
        Q_ind_e_vec[:, index] = Q_ind_e
        Q_ind_h_vec[:, index] = Q_ind_h
        y_e_vec[:, index] = y_e
        v_e_vec[:, index] = v_e
        y_h_vec[:, index] = y_h
        v_h_vec[:, index] = v_h
        E_q_y_e_vec[:, index] = E_q_y_e
        E_q_y_h_vec[:, index] = E_q_y_h
        E_w_y_e_vec[:, index] = E_w_y_e
        E_w_y_h_vec[:, index] = E_w_y_h
        Phi_w_y_e_vec[:, index] = Phi_w_e
        Phi_w_y_h_vec[:, index] = Phi_w_h
        
        progress_bar.update(index)
    progress_bar.finish()

    # It is already summed?
    Q_tot_e = Q_ind_e_vec.sum(axis=0)
    Q_tot_h = Q_ind_h_vec.sum(axis=0)
    Q_tot = Q_tot_e + Q_tot_h
    
    return y_e_vec, y_h_vec, Q_tot, Q_tot_e, Q_tot_h
Exemplo n.º 10
0
def create_hitor_calibration(output_filename):
    logging.info('Analyze and plot results of %s', output_filename)

    def plot_calibration(col_row_combinations, scan_parameter, calibration_data, filename):  # Result calibration plot function
        for index, (column, row) in enumerate(col_row_combinations):
            logging.info("Plot calibration for pixel " + str(column) + '/' + str(row))
            fig = Figure()
            FigureCanvas(fig)
            ax = fig.add_subplot(111)
            fig.patch.set_facecolor('white')
            ax.grid(True)
            ax.errorbar(scan_parameter, calibration_data[column - 1, row - 1, :, 0] * 25. + 25., yerr=[calibration_data[column - 1, row - 1, :, 2] * 25, calibration_data[column - 1, row - 1, :, 2] * 25], fmt='o', label='FE-I4 ToT [ns]')
            ax.errorbar(scan_parameter, calibration_data[column - 1, row - 1, :, 1] * 1.5625, yerr=[calibration_data[column - 1, row - 1, :, 3] * 1.5625, calibration_data[column - 1, row - 1, :, 3] * 1.5625], fmt='o', label='TDC ToT [ns]')
            ax.set_title('Calibration for pixel ' + str(column) + '/' + str(row))
            ax.set_xlabel('Charge [PlsrDAC]')
            ax.set_ylabel('TOT')
            ax.legend(loc=0)
            filename.savefig(fig)
            if index > 100:  # stop for too many plots
                logging.info('Do not create pixel plots for more than 100 pixels to safe time')
                break

    with AnalyzeRawData(raw_data_file=output_filename, create_pdf=True) as analyze_raw_data:  # Interpret the raw data file
        analyze_raw_data.create_occupancy_hist = False  # too many scan parameters to do in ram histograming
        analyze_raw_data.create_hit_table = True
        analyze_raw_data.create_tdc_hist = True
        analyze_raw_data.align_at_tdc = True  # align events at TDC words, first word of event has to be a tdc word
        analyze_raw_data.interpret_word_table()
        analyze_raw_data.interpreter.print_summary()
        analyze_raw_data.plot_histograms()
        n_injections = analyze_raw_data.n_injections  # store number of injections for later cross check

    with tb.open_file(output_filename + '_interpreted.h5', 'r') as in_file_h5:  # Get scan parameters from interpreted file
        meta_data = in_file_h5.root.meta_data[:]
        hits = in_file_h5.root.Hits[:]
        scan_parameters_dict = get_scan_parameter(meta_data)
        inner_loop_parameter_values = scan_parameters_dict[next(reversed(scan_parameters_dict))]  # inner loop parameter name is unknown
        scan_parameter_names = scan_parameters_dict.keys()
        col_row_combinations = get_unique_scan_parameter_combinations(in_file_h5.root.meta_data[:], scan_parameters=('column', 'row'), scan_parameter_columns_only=True)

        meta_data_table_at_scan_parameter = get_unique_scan_parameter_combinations(meta_data, scan_parameters=scan_parameter_names)
        parameter_values = get_scan_parameters_table_from_meta_data(meta_data_table_at_scan_parameter, scan_parameter_names)
        event_number_ranges = get_ranges_from_array(meta_data_table_at_scan_parameter['event_number'])
        event_ranges_per_parameter = np.column_stack((parameter_values, event_number_ranges))
        event_numbers = hits['event_number'].copy()  # create contigous array, otherwise np.searchsorted too slow, http://stackoverflow.com/questions/15139299/performance-of-numpy-searchsorted-is-poor-on-structured-arrays

        with tb.openFile(output_filename + "_calibration.h5", mode="w") as calibration_data_file:
            logging.info('Create calibration')
            output_pdf = PdfPages(output_filename + "_calibration.pdf")
            calibration_data = np.zeros(shape=(80, 336, len(inner_loop_parameter_values), 4), dtype='f4')  # result of the calibration is a histogram with col_index, row_index, plsrDAC value, mean discrete tot, rms discrete tot, mean tot from TDC, rms tot from TDC

            progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(event_ranges_per_parameter), term_width=80)
            progress_bar.start()

            for index, (parameter_values, event_start, event_stop) in enumerate(event_ranges_per_parameter):
                if event_stop is None:  # happens for the last chunk
                    event_stop = hits[-1]['event_number']
                array_index = np.searchsorted(event_numbers, np.array([event_start, event_stop]))
                actual_hits = hits[array_index[0]:array_index[1]]
                actual_col, actual_row, parameter_value = parameter_values

                if len(hits[np.logical_and(actual_hits['column'] != actual_col, actual_hits['row'] != actual_row)]):
                    logging.warning('There are %d hits from not selected pixels in the data', len(actual_hits[np.logical_and(actual_hits['column'] != actual_col, actual_hits['row'] != actual_row)]))

                actual_hits = actual_hits[np.logical_and(actual_hits['column'] == actual_col, actual_hits['row'] == actual_row)]
                actual_tdc_hits = actual_hits[(actual_hits['event_status'] & 0b0000111110011100) == 0b0000000100000000]  # only take hits from good events (one TDC word only, no error)
                actual_tot_hits = actual_hits[(actual_hits['event_status'] & 0b0000100010011100) == 0b0000000000000000]  # only take hits from good events for tot
                tot, tdc = actual_tot_hits['tot'], actual_tdc_hits['TDC']

                if tdc.shape[0] != n_injections and index == event_ranges_per_parameter.shape[0] - 1:
                    logging.warning('There are %d != %d TDC hits for %s = %s', tdc.shape[0], n_injections, str(scan_parameter_names), str(parameter_values))

                inner_loop_scan_parameter_index = np.where(parameter_value == inner_loop_parameter_values)[0][0]  # translate the scan parameter value to an index for the result histogram
                calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 0] = np.mean(tot)
                calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 1] = np.mean(tdc)
                calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 2] = np.std(tot)
                calibration_data[actual_col - 1, actual_row - 1, inner_loop_scan_parameter_index, 3] = np.std(tdc)

                progress_bar.update(index)

            calibration_data_out = calibration_data_file.createCArray(calibration_data_file.root, name='HitOrCalibration', title='Hit OR calibration data', atom=tb.Atom.from_dtype(calibration_data.dtype), shape=calibration_data.shape, filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
            calibration_data_out[:] = calibration_data
            calibration_data_out.attrs.dimensions = scan_parameter_names
            calibration_data_out.attrs.scan_parameter_values = inner_loop_parameter_values
            plot_calibration(col_row_combinations, scan_parameter=inner_loop_parameter_values, calibration_data=calibration_data, filename=output_pdf)
            output_pdf.close()
            progress_bar.finish()
Exemplo n.º 11
0
def WeatherGraphs(GraphPath, Weather_Data):
    """Creates monthly weather graphs
    
    Creates monthly graphs showing the weather patterns
    
    Parameters
    ----------
    GraphPath : str
        Path to save the graphs.
    
    Weather_Data : array-like
        The weather data to graph.
    """
    majorTick = mdates.DayLocator(interval=7)  # every month
    majorFmt = mdates.DateFormatter('%d/%m')
    minorTick = mdates.DayLocator()  # every day
    if not os.path.isdir(GraphPath + 'Monthly/'):
        print('Monthly folder not found, creating it')
        os.makedirs(GraphPath + 'Monthly')
    if not os.path.isfile(GraphPath + 'Monthly/Weather2018-3.png'):
        widgets = [
            'Creating monthly weather graph: ',
            progressbar.Percentage(), ' ',
            progressbar.Bar(marker='∎', left='[', right=']'), ' ',
            progressbar.AdaptiveETA()
        ]
        pbar = progressbar.ProgressBar(widgets=widgets, maxval=60)
        pbar.start()
        month = 3
        year = 3
        count = 0
        done = False
        #print('Creating Monthly weather graph [%]')
        while not done:
            pbar.update(count)
            count = count + 1
            #if count % 6 == 0:
            #    print('\u220E', end='', flush=True)
            StartPoint = Weather_Data.index.get_loc(
                '201' + str(year) + '-' + str(month) + '-' + '01 00:00',
                method='nearest')
            EndPoint = Weather_Data['201' + str(year) + '-' +
                                    str(month)].shape[0]
            fig, ax = plt.subplots()
            ax.plot(Weather_Data.index[StartPoint:StartPoint + EndPoint],
                    Weather_Data.W_speed_10['201' + str(year) + '-' +
                                            str(month)],
                    color=Color2)
            # format the ticks
            ax.xaxis.set_major_locator(majorTick)
            ax.xaxis.set_major_formatter(majorFmt)
            ax.xaxis.set_minor_locator(minorTick)
            datemin = np.datetime64(Weather_Data.index[StartPoint],
                                    'm')  # round to nearest years...
            datemax = np.datetime64(
                Weather_Data.index[StartPoint + EndPoint - 1], 'm')
            ax.set_xlim(datemin, datemax)
            ax.grid(which='major', alpha=1, color='black')
            ax.grid(which='minor', alpha=0.05, color='black')
            fig.autofmt_xdate()
            plt.title('201' + str(year) + ' - ' + monthDict[month])
            plt.ylim([0, 110])
            plt.xlabel('Time [date]')
            plt.ylabel('Wind Speed [km/h]')
            plt.savefig(GraphPath + 'Monthly/' + 'Weather201' + str(year) +
                        '-' + str(month) + '.png')
            plt.close(fig)
            if month == 12:
                month = 1
                year = year + 1
            else:
                month = month + 1
            if year == 8 and month == 4:
                done = True
                #print('')
                pbar.finish()
Exemplo n.º 12
0
        "[ERR] Some modules failed to import. Install all the program's requirements with the following command:\npip install --upgrade -r requirements.txt"
    )
subprocess.call("cls" if os.name == "nt" else "clear", shell=True)

print(Style.NORMAL + Fore.RESET)
if os.name == "nt":
    print(Fore.BLUE + "Checking for / downloading FFMPEG binaries...")
    ff = ["ffmpeg.exe", "ffplay.exe", "ffprobe.exe"]
    if any([not os.path.exists(x) for x in ff]):
        print(Fore.GREEN)
        widgets = [
            "Downloading FFmpeg: ",
            progressbar.Bar(), " ",
            progressbar.Percentage(),
            progressbar.Timer(" (%(elapsed)s elapsed, "),
            progressbar.AdaptiveETA(), " remaining)"
        ]
        if platform.uname()[4].lower() == "amd64":  # 64-bit architecture
            arc = "64"
            mxc = 8000  # Chunks estimate
        else:  # 32-bit architecture
            arc = "32"
            mxc = np.ceil(55159009 / 8096)  # Chunks estimate

        url = "https://ffmpeg.zeranoe.com/builds/win{0}/static/ffmpeg-4.1-win{0}-static.zip".format(
            arc)
        fn = url.split("/")[-1]
        res = requests.get(url, stream=True)
        pb = progressbar.ProgressBar(widgets=widgets, max_value=mxc)
        pb.start()
        with open(fn, "wb") as fout:
def correlationCoefficientScan(
        output_path='/Users/tyleralbee/Desktop/StealthCME',
        eve_data_path='/Users/tyleralbee/Desktop/savesets/eve_selected_lines.csv',
        cme_signature='/Users/tyleralbee/Desktop/savesets/eve_lines_event_percents_fitted.csv',
        verbose=True):

    eve_lines = pd.read_csv(eve_data_path, index_col=0)
    eve_lines.index = pd.to_datetime(eve_lines.index)
    wholeDfLength = eve_lines.__len__()

    cme_event = pd.read_csv(cme_signature, index_col=0)
    cme_event.index = pd.to_datetime(cme_event.index)
    cmeEventLength = cme_event.__len__()

    if verbose:
        logger = JpmLogger(filename='do_correlation_coefficient_scan',
                           path=output_path,
                           console=True)
        logger.info("Starting Stealth CME search pipeline!")
    else:
        logger = None

    if verbose:
        logger.info('Loaded EVE and CME data')

    # Define the columns of the output catalog
    output_table = pd.DataFrame(columns=[
        'Event #', 'Start Time', 'End Time', 'Correlation Coefficient'
    ])
    csv_filename = output_path + 'cc_output_{0}.csv'.format(Time.now().iso)
    output_table.to_csv(csv_filename, header=True, index=False, mode='w')

    if verbose:
        logger.info('Created output table definition.')

    # Start a progress bar
    widgets = [
        progressbar.Percentage(),
        progressbar.Bar(),
        progressbar.Timer(), ' ',
        progressbar.AdaptiveETA()
    ]

    startRow = 0
    endRow = cmeEventLength
    numSlices = int(wholeDfLength / cmeEventLength)
    output_row = 1

    progress_bar_sliding_window = progressbar.ProgressBar(
        widgets=[progressbar.FormatLabel('Correlation Coefficient Analysis ')
                 ] + widgets,
        max_value=numSlices).start()

    # ----------Loop through data set using a sliding time window-------------------------------------------------------

    for i in range(1, numSlices):

        # ----------Clip dataset to time slice window-------------------------------------------------------------------

        event_time_slice = eve_lines.iloc[startRow:endRow]

        # ---------Convert irradiance values to percentages-------------------------------------------------------------

        preflare_irradiance = event_time_slice.iloc[0]
        event_time_slice_percentages = (event_time_slice - preflare_irradiance
                                        ) / preflare_irradiance * 100.0

        if verbose:
            logger.info(
                "Event {0} irradiance converted from absolute to percent units."
                .format(i))

        # ---------Fit light curves to reduce noise---------------------------------------------------------------------

        uncertainty = np.ones(len(event_time_slice_percentages)
                              ) * 0.002545  # got this line from James's code

        progress_bar_fitting = progressbar.ProgressBar(
            widgets=[progressbar.FormatLabel('Light curve fitting: ')] +
            widgets,
            max_value=len(event_time_slice_percentages.columns)).start()

        for j, column in enumerate(event_time_slice_percentages):
            if event_time_slice_percentages[column].isnull().all().all():
                if verbose:
                    logger.info(
                        'Event {0} {1} fitting skipped because all irradiances are NaN.'
                        .format(j, column))
            else:
                eve_line_event_percentages = pd.DataFrame(
                    event_time_slice_percentages[column])
                eve_line_event_percentages.columns = ['irradiance']
                eve_line_event_percentages['uncertainty'] = uncertainty

                fitting_path = output_path + 'Fitting/'
                if not os.path.exists(fitting_path):
                    os.makedirs(fitting_path)

                plt.close('all')
                light_curve_fit, best_fit_gamma, best_fit_score = automatic_fit_light_curve(
                    eve_line_event_percentages,
                    plots_save_path='{0} Event {1} {2} '.format(
                        fitting_path, j, column),
                    verbose=verbose,
                    logger=logger)
                event_time_slice_percentages[column] = light_curve_fit
                event_time_slice_fitted = event_time_slice_percentages  # Keep our variable names explicit

                if verbose:
                    logger.info('Event {0} {1} light curves fitted.'.format(
                        j, column))
                progress_bar_fitting.update(j)

        progress_bar_fitting.finish()

        if verbose:
            logger.info("Event {0} Light curves fitted".format(i))

        # ---------Compute Correlation Coefficients---------------------------------------------------------------------

        totalCorrelationCoefficient = 0.0
        ds1 = event_time_slice_fitted
        ds2 = cme_event

        # Gather stats for correlation
        for k, column in enumerate(ds1):
            dsColumn1 = ds1[column]
            dsColumn2 = ds2[column]

            dsColumn1.reset_index(
                drop=True, inplace=True)  # prevent NaNs from appearing in join
            dsColumn2.reset_index(
                drop=True, inplace=True)  # prevent NaNs from appearing in join

            # TODO: assert that both columns have same count?
            n = int(dsColumn1.count())
            meanA = float(dsColumn1.mean())
            meanB = float(dsColumn2.mean())
            stdA = float(dsColumn1.std(ddof=0))
            stdB = float(dsColumn2.std(ddof=0))

            # Generate correlation output
            dsJoined = pd.DataFrame({
                'a': dsColumn1,
                'b': dsColumn2
            })  # Avoids ambiguity when attr names are the same
            numerator = 0.0  # Stores summation of (a_i - meanA)(b_i - meanB)
            denominator = n * stdA * stdB

            for index, row in dsJoined.iterrows():
                a = row['a']
                b = row['b']
                numerator = numerator + (a - meanA) * (b - meanB)

            correlationCoefficient = numerator / denominator
            totalCorrelationCoefficient = totalCorrelationCoefficient + correlationCoefficient

        # ---------Output Results---------------------------------------------------------------------------------------

        eventStartTime = event_time_slice.iloc[0].name
        eventEndTime = event_time_slice.iloc[-1].name

        if not math.isnan(totalCorrelationCoefficient
                          ) and totalCorrelationCoefficient >= 4.2:
            output_table.loc[output_row] = [
                output_row, eventStartTime, eventEndTime,
                totalCorrelationCoefficient
            ]
            csv_filename = output_path + 'cc_output_{0}.csv'.format(
                Time.now().iso)
            output_table.to_csv(csv_filename,
                                header=True,
                                index=False,
                                mode='w')
            output_row = output_row + 1

        startRow = startRow + 60  # advance time window by 1 hour
        endRow = endRow + 60  # advance time window by 1 hour
        progress_bar_sliding_window.update(i)  # advance progress bar
Exemplo n.º 14
0
def create_threshold_calibration(scan_base_file_name, create_plots=True):  # Create calibration function, can be called stand alone
    def analyze_raw_data_file(file_name):
        if os.path.isfile(os.path.splitext(file_name)[0] + '_interpreted.h5'):  # skip analysis if already done
            logging.warning('Analyzed data file ' + file_name + ' already exists. Skip analysis for this file.')
        else:
            with AnalyzeRawData(raw_data_file=file_name, create_pdf=False) as analyze_raw_data:
                analyze_raw_data.create_tot_hist = False
                analyze_raw_data.create_tot_pixel_hist = False
                analyze_raw_data.create_fitted_threshold_hists = True
                analyze_raw_data.create_threshold_mask = True
                analyze_raw_data.interpreter.set_warning_output(False)  # RX errors would fill the console
                analyze_raw_data.interpret_word_table()

    def store_calibration_data_as_table(out_file_h5, mean_threshold_calibration, mean_threshold_rms_calibration, threshold_calibration, parameter_values):
        logging.info("Storing calibration data in a table...")
        filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False)
        mean_threshold_calib_table = out_file_h5.create_table(out_file_h5.root, name='MeanThresholdCalibration', description=data_struct.MeanThresholdCalibrationTable, title='mean_threshold_calibration', filters=filter_table)
        threshold_calib_table = out_file_h5.create_table(out_file_h5.root, name='ThresholdCalibration', description=data_struct.ThresholdCalibrationTable, title='threshold_calibration', filters=filter_table)
        for column in range(80):
            for row in range(336):
                for parameter_value_index, parameter_value in enumerate(parameter_values):
                    threshold_calib_table.row['column'] = column
                    threshold_calib_table.row['row'] = row
                    threshold_calib_table.row['parameter_value'] = parameter_value
                    threshold_calib_table.row['threshold'] = threshold_calibration[column, row, parameter_value_index]
                    threshold_calib_table.row.append()
        for parameter_value_index, parameter_value in enumerate(parameter_values):
            mean_threshold_calib_table.row['parameter_value'] = parameter_value
            mean_threshold_calib_table.row['mean_threshold'] = mean_threshold_calibration[parameter_value_index]
            mean_threshold_calib_table.row['threshold_rms'] = mean_threshold_rms_calibration[parameter_value_index]
            mean_threshold_calib_table.row.append()
        threshold_calib_table.flush()
        mean_threshold_calib_table.flush()
        logging.info("done")

    def store_calibration_data_as_array(out_file_h5, mean_threshold_calibration, mean_threshold_rms_calibration, threshold_calibration, parameter_name, parameter_values):
        logging.info("Storing calibration data in an array...")
        filter_table = tb.Filters(complib='blosc', complevel=5, fletcher32=False)
        mean_threshold_calib_array = out_file_h5.create_carray(out_file_h5.root, name='HistThresholdMeanCalibration', atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype), shape=mean_threshold_calibration.shape, title='mean_threshold_calibration', filters=filter_table)
        mean_threshold_calib_rms_array = out_file_h5.create_carray(out_file_h5.root, name='HistThresholdRMSCalibration', atom=tb.Atom.from_dtype(mean_threshold_calibration.dtype), shape=mean_threshold_calibration.shape, title='mean_threshold_rms_calibration', filters=filter_table)
        threshold_calib_array = out_file_h5.create_carray(out_file_h5.root, name='HistThresholdCalibration', atom=tb.Atom.from_dtype(threshold_calibration.dtype), shape=threshold_calibration.shape, title='threshold_calibration', filters=filter_table)
        mean_threshold_calib_array[:] = mean_threshold_calibration
        mean_threshold_calib_rms_array[:] = mean_threshold_rms_calibration
        threshold_calib_array[:] = threshold_calibration
        mean_threshold_calib_array.attrs.dimensions = ['column', 'row', parameter_name]
        mean_threshold_calib_rms_array.attrs.dimensions = ['column', 'row', parameter_name]
        threshold_calib_array.attrs.dimensions = ['column', 'row', parameter_name]
        mean_threshold_calib_array.attrs.scan_parameter_values = parameter_values
        mean_threshold_calib_rms_array.attrs.scan_parameter_values = parameter_values
        threshold_calib_array.attrs.scan_parameter_values = parameter_values

        logging.info("done")

    def mask_columns(pixel_array, ignore_columns):
        idx = np.array(ignore_columns) - 1  # from FE to Array columns
        m = np.zeros_like(pixel_array)
        m[:, idx] = 1
        return np.ma.masked_array(pixel_array, m)

    raw_data_files = analysis_utils.get_data_file_names_from_scan_base(scan_base_file_name)
    first_scan_base_file_name = scan_base_file_name if isinstance(scan_base_file_name, basestring) else scan_base_file_name[0]  # multilpe scan_base_file_names for multiple runs

    with tb.open_file(first_scan_base_file_name + '.h5', mode="r") as in_file_h5:  # deduce scan parameters from the first (and often only) scan base file name
        ignore_columns = in_file_h5.root.configuration.run_conf[:][np.where(in_file_h5.root.configuration.run_conf[:]['name'] == 'ignore_columns')]['value'][0]
        parameter_name = in_file_h5.root.configuration.run_conf[:][np.where(in_file_h5.root.configuration.run_conf[:]['name'] == 'scan_parameters')]['value'][0]
        ignore_columns = ast.literal_eval(ignore_columns)
        parameter_name = ast.literal_eval(parameter_name)[1][0]

    calibration_file = first_scan_base_file_name + '_calibration'

    for raw_data_file in raw_data_files:  # analyze each raw data file, not using multithreading here, it is already used in s-curve fit
        analyze_raw_data_file(raw_data_file)

    files_per_parameter = analysis_utils.get_parameter_value_from_file_names([os.path.splitext(file_name)[0] + '_interpreted.h5' for file_name in raw_data_files], parameter_name, unique=True, sort=True)

    logging.info("Create calibration from data")
    mean_threshold_calibration = np.empty(shape=(len(raw_data_files),), dtype='<f8')
    mean_threshold_rms_calibration = np.empty(shape=(len(raw_data_files),), dtype='<f8')
    threshold_calibration = np.empty(shape=(80, 336, len(raw_data_files)), dtype='<f8')

    if create_plots:
        logging.info('Saving calibration plots in: %s', calibration_file + '.pdf')
        output_pdf = PdfPages(calibration_file + '.pdf')

    progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(files_per_parameter.items()), term_width=80)
    progress_bar.start()
    parameter_values = []
    for index, (analyzed_data_file, parameters) in enumerate(files_per_parameter.items()):
        parameter_values.append(parameters.values()[0][0])
        with tb.open_file(analyzed_data_file, mode="r") as in_file_h5:
            occupancy_masked = mask_columns(pixel_array=in_file_h5.root.HistOcc[:], ignore_columns=ignore_columns)  # mask the not scanned columns for analysis and plotting
            thresholds_masked = mask_columns(pixel_array=in_file_h5.root.HistThresholdFitted[:], ignore_columns=ignore_columns)
            if create_plots:
                plot_three_way(hist=thresholds_masked, title='Threshold Fitted for ' + parameters.keys()[0] + ' = ' + str(parameters.values()[0][0]), filename=output_pdf)
                plsr_dacs = analysis_utils.get_scan_parameter(meta_data_array=in_file_h5.root.meta_data[:])['PlsrDAC']
                plot_scurves(occupancy_hist=occupancy_masked, scan_parameters=plsr_dacs, scan_parameter_name='PlsrDAC', filename=output_pdf)
            # fill the calibration data arrays
            mean_threshold_calibration[index] = np.ma.mean(thresholds_masked)
            mean_threshold_rms_calibration[index] = np.ma.std(thresholds_masked)
            threshold_calibration[:, :, index] = thresholds_masked.T
        progress_bar.update(index)
    progress_bar.finish()

    with tb.open_file(calibration_file + '.h5', mode="w") as out_file_h5:
        store_calibration_data_as_array(out_file_h5=out_file_h5, mean_threshold_calibration=mean_threshold_calibration, mean_threshold_rms_calibration=mean_threshold_rms_calibration, threshold_calibration=threshold_calibration, parameter_name=parameter_name, parameter_values=parameter_values)
        store_calibration_data_as_table(out_file_h5=out_file_h5, mean_threshold_calibration=mean_threshold_calibration, mean_threshold_rms_calibration=mean_threshold_rms_calibration, threshold_calibration=threshold_calibration, parameter_values=parameter_values)

    if create_plots:
        plot_scatter(x=parameter_values, y=mean_threshold_calibration, title='Threshold calibration', x_label=parameter_name, y_label='Mean threshold', log_x=False, filename=output_pdf)
        plot_scatter(x=parameter_values, y=mean_threshold_calibration, title='Threshold calibration', x_label=parameter_name, y_label='Mean threshold', log_x=True, filename=output_pdf)
        output_pdf.close()
Exemplo n.º 15
0
def create_progressbar_reader(reader, max_reads=None, mag_format=None):
    """Wrap an iterable in a ProgressBar.
    
    Args:
        max_reads: Max number of items, if known in advance.
        mag_format: Function that formats an integer as a string with magnitude
            (e.g. 1000000 => 1M).
    """
    import progressbar
    import progressbar.widgets

    class ProgressBarReader(progressbar.ProgressBar):
        """Extension of ProgressBar that supports starting and stopping the
        BatchReader.
        """
        def __init__(self, iterable, widgets, max_value=None):
            super(ProgressBarReader,
                  self).__init__(widgets=widgets,
                                 max_value=max_value
                                 or progressbar.UnknownLength)
            self._iterable = iterable
            self.done = False

        def __next__(self):
            try:
                value = next(self._iterable)
                if self.start_time is None:
                    self.start()
                self.update(self.value + value[0]["size"])
                return value
            except StopIteration:
                self.close()
                raise

        def close(self):
            """Finish the progress bar and close the underlying iterator.
            """
            if not self.done:
                self.finish()
                self.done = True
            try:
                self._iterable.close()
            except:
                pass

    class MagCounter(progressbar.widgets.WidgetBase):
        """Custom widget that formats the value using a specified magnitude.
        """
        def __init__(self, mag_format):
            super().__init__()
            self._format = mag_format

        def __call__(self, progress, data):
            return self._format(data["value"])

    if max_reads:
        reader = ProgressBarReader(reader, [
            MagCounter(mag_format), " Reads (",
            progressbar.Percentage(), ") ",
            progressbar.Timer(), " ",
            progressbar.Bar(),
            progressbar.AdaptiveETA()
        ], max_reads)
    else:
        reader = ProgressBarReader(reader, [
            MagCounter(mag_format), " Reads",
            progressbar.Timer(),
            progressbar.AnimatedMarker()
        ])

    return reader
Exemplo n.º 16
0
def _solve_dd(p_e_0, p_h_0, q0, n_steps, dt, geom_descr, pot_w_descr,
              pot_descr, temp, diffusion, t_e_trapping, t_h_trapping, t_e_t1,
              t_h_t1, t_r, save_frac):
    p_e, p_h = p_e_0, p_h_0

    # Result arrays initialized to NaN
    n_store = int(n_steps / save_frac)  # Steps to store

    max_step_size = n_steps / n_store * 10
    # Different store time step for each e-h pair
    T = np.full(shape=(n_store, p_e.shape[1]),
                fill_value=np.nan,
                dtype=np.float32)
    # Stored trajectory for each eh pair
    traj_e = np.full(shape=(n_store, p_e.shape[0], p_e.shape[1]),
                     fill_value=np.nan)
    traj_h = np.full(shape=(n_store, p_h.shape[0], p_h.shape[1]),
                     fill_value=np.nan)
    # Stored induced charge for each eh pair
    I_ind_e = np.zeros(shape=(n_store, p_e.shape[1]))
    I_ind_h = np.zeros_like(I_ind_e)
    # Helper array(s) of actual step index and next step index
    i_step = np.zeros(p_e.shape[1], dtype=np.int)  # Result array indeces
    next_step = np.zeros_like(i_step)  # Next time step to store
    # Summed induced charge/current with every time step
    I_ind_tot = np.zeros(shape=(n_steps))
    # Total induced charge
    Q_ind_tot_e = np.zeros(shape=(p_e.shape[1]))
    Q_ind_tot_h = np.zeros(shape=(p_h.shape[1]))

    def add_diffusion(v_e, v_h):
        # Calculate absolute thermal velocity
        v_th_e = silicon.get_thermal_velocity(temperature=temp,
                                              is_electron=True)
        v_th_h = silicon.get_thermal_velocity(temperature=temp,
                                              is_electron=False)
        # Create thermal velocity distribution
        # From: The Atomistic Simulation of Thermal Diffusion
        # and Coulomb Drift in Semiconductor Detectors
        # IEEE VOL. 56, NO. 3, JUNE 2009
        v_th_e *= np.sqrt(
            2. / 3. *
            np.log(np.abs(1. / (1. - np.random.uniform(size=v_e.shape[1])))))
        v_th_h *= np.sqrt(
            2. / 3. *
            np.log(np.abs(1. / (1. - np.random.uniform(size=v_h.shape[1])))))
        # Calculate random direction in x, y
        # Uniform random number 0 .. 2 Pi
        eta = np.random.uniform(0., 2. * np.pi, size=v_e.shape[1])
        direction_e = np.array([np.cos(eta), np.sin(eta)])
        eta = np.random.uniform(0., 2. * np.pi, size=v_h.shape[1])
        direction_h = np.array([np.cos(eta), np.sin(eta)])

        v_th_e = v_th_e[np.newaxis, :] * direction_e
        v_th_h = v_th_h[np.newaxis, :] * direction_h

        v_e += v_th_e
        v_h += v_th_h

        return v_e, v_h

    def cal_step_size(t, Q_ind_tot, dt, dydt, q_max, i_step, n_store):
        ''' Calculates the step size from the actual data

        It is assumed that the actual slope stays constant.
        The remaining time distance is calculated and the step size is
        adjusted to fit the remaining steps.
        '''

        step_size = np.zeros_like(q_max)

        # All steps are used, mark as done
        sel_done = n_store - i_step - 1 <= 0

        # All storage spaces used up
        if step_size[~sel_done].size == 0:
            return step_size.astype(np.int)

        # Set next step to NaN if storing is done
        step_size[sel_done] = np.NaN

        # Calculate remaining x distance to cover
        try:
            # Case: increasing function (assume value = q_max at tmax)
            t_max_exp = ((q_max - Q_ind_tot) / dydt + t)[~sel_done]
            # Case: decreasing function (assume value = 0 at tmax)
            t_max_exp[dydt[~sel_done] < 0] = (
                (-q_max - Q_ind_tot) / dydt +
                t)[~sel_done][dydt[~sel_done] < 0]
            # Correct expected time larger than simulation time
            t_max_exp[t_max_exp > n_steps * dt] = n_steps * dt
            # Remaining time to cover
            t_left = t_max_exp - t
        except (IndexError, ValueError):
            logging.error('q_max.shape %s', str(q_max.shape))
            logging.error('sel_done.shape %s', str(sel_done.shape))
            logging.error('Q_ind_tot.shape %s', str(Q_ind_tot.shape))
            logging.error('dydt.shape %s', str(dydt.shape))
            logging.error('t_max_exp.shape %s', str(t_max_exp.shape))
            logging.error('dydt[~sel_done].shape %s',
                          str(dydt[~sel_done].shape))
            logging.error('(dydt[~sel_done] < 0).shape %s',
                          str((dydt[~sel_done] < 0).shape))
            logging.error('(-q_max - Q_ind_tot) / dydt + t).shape %s',
                          str(((-q_max - Q_ind_tot) / dydt + t).shape))
            logging.error(
                '(-q_max - Q_ind_tot) / dydt + t)[~sel_done].shape %s',
                str(((-q_max - Q_ind_tot) / dydt + t)[~sel_done].shape))

            raise

        # Calculate the step size
        step_size[~sel_done] = t_left / dt / (n_store - i_step[~sel_done] - 1)

        # Limit step size to max_step_size
        # Needed for slope direction changing functions
        sel_lim_max = step_size[~sel_done] > max_step_size
        step_size[~sel_done][sel_lim_max] = max_step_size
        # Minimum step size = 1
        step_size[np.logical_and(~sel_done, step_size < 1.)] = 1

        step_size = step_size.astype(np.int)

        return step_size

    def store_if_needed(step, next_step, Q_ind_tot_e, Q_ind_tot_h, dt, dQ_e,
                        dQ_h, T, I_ind_e, I_ind_h, p_e, p_h, q_max, i_step,
                        n_store, sel_e, sel_h):
        ''' Checks if charge carriers value needs to be stored and returns
                next storage time step
        '''

        t = dt * step  # Actual time step

        # Select charges that need storing for full array (1 entry per e-h)
        store = step == next_step

        # Select charges that need storing and are not fully propagated yet
        # for full array (1 entry per e-h)
        store_e = np.logical_and(store, sel_e)
        store_h = np.logical_and(store, sel_h)

        # Select charges that need storing for reduced array (e-h that need
        # propagating
        s_e = store_e[sel_e]
        s_h = store_h[sel_h]

        # All carrier stored
        if not np.any(store):
            return

        # All carriers needing storing are fully drifted
        if not np.any(s_e) and not np.any(s_h):
            return

        # Set data of actual time step
        T[i_step[store], store] = t

        # Calculate induced charge for integrated time steps T
        DT_e = T[i_step[store_e], store_e] - T[i_step[store_e] - 1, store_e]
        DT_h = T[i_step[store_h], store_h] - T[i_step[store_h] - 1, store_h]
        DT_e[np.isnan(DT_e)] = dt  # First storing
        DT_h[np.isnan(DT_h)] = dt  # First storing
        I_ind_e[i_step[store_e], store_e] = dQ_e_step[store_e] / DT_e
        I_ind_h[i_step[store_h], store_h] = dQ_h_step[store_h] / DT_h

        # Store data
        #         I_ind_e[i_step[store_e], store_e] = dQ_e[s_e] / dt
        #         I_ind_h[i_step[store_h], store_h] = dQ_h[s_h] / dt

        traj_e[i_step[store_e], :, store_e] = p_e[:, store_e].T
        traj_h[i_step[store_h], :, store_h] = p_h[:, store_h].T

        #         if np.max(i_step) == 145:
        #             plt.plot(T[:, 0], I_ind_e[:, 0], '.')
        #             plt.plot(T[:, 0], I_ind_h[:, 0], '.')
        #             plt.show()

        # Calculate step size as the minimum of the e and h step size
        d_step = np.zeros_like(next_step)
        d_step_e = np.zeros_like(d_step)
        d_step_h = np.zeros_like(d_step)
        if np.any(store_e):
            d_step_e[store_e] = cal_step_size(t,
                                              Q_ind_tot=Q_ind_tot_e[store_e],
                                              dt=dt,
                                              dydt=I_ind_e[i_step[store_e],
                                                           store_e],
                                              q_max=q_max[store_e],
                                              i_step=i_step[store_e],
                                              n_store=n_store)
            d_step[store_e] = d_step_e[store_e]

        if np.any(store_h):
            d_step_h[store_h] = cal_step_size(t,
                                              Q_ind_tot=Q_ind_tot_h[store_h],
                                              dt=dt,
                                              dydt=I_ind_h[i_step[store_h],
                                                           store_h],
                                              q_max=q_max[store_h],
                                              i_step=i_step[store_h],
                                              n_store=n_store)
            d_step[store_h] = d_step_h[store_h]
        sel = np.logical_and(store_e, store_h)
        d_step[sel] = np.minimum(d_step_e[sel], d_step_h[sel])

        next_step += d_step

        # Increase storage hists indeces
        i_step[store] += 1

        i_step[i_step >= T.shape[0]] = T.shape[0] - 1

        # Reset tmp. variable
        dQ_e_step[store_e] = 0.
        dQ_h_step[store_h] = 0.

    # Tmp. variables to store the total induced charge per save step
    # Otherwise the resolution of induced current calculation
    # is reduced
    dQ_e_step = np.zeros(shape=p_e.shape[1])
    dQ_h_step = np.zeros(shape=p_h.shape[1])

    progress_bar = progressbar.ProgressBar(widgets=[
        '',
        progressbar.Percentage(), ' ',
        progressbar.Bar(marker='*', left='|', right='|'), ' ',
        progressbar.AdaptiveETA()
    ],
                                           maxval=n_steps,
                                           term_width=80)

    progress_bar.start()

    sel_e, _ = _in_boundary(geom_descr,
                            pot_descr=pot_descr,
                            x=p_e[0, :],
                            y=p_e[1, :],
                            sel=np.ones(p_e.shape[1], dtype=np.bool))
    sel_h, _ = _in_boundary(geom_descr,
                            pot_descr=pot_descr,
                            x=p_h[0, :],
                            y=p_h[1, :],
                            sel=np.ones(p_h.shape[1], dtype=np.bool))

    for step in range(n_steps):
        # Check if all particles out of boundary
        if not np.any(sel_h) and not np.any(sel_e):
            break  # Stop loop to safe time

        # Electric field in V/cm
        E_e = pot_descr.get_field(p_e[0, sel_e], p_e[1, sel_e]) * 1e4
        E_h = pot_descr.get_field(p_h[0, sel_h], p_h[1, sel_h]) * 1e4

        # Mobility in cm2 / Vs
        mu_e = silicon.get_mobility(np.sqrt(E_e[0]**2 + E_e[1]**2),
                                    temperature=temp,
                                    is_electron=True)
        mu_h = silicon.get_mobility(np.sqrt(E_h[0]**2 + E_h[1]**2),
                                    temperature=temp,
                                    is_electron=False)

        # Drift velocity in cm / s
        v_e, v_h = -E_e * mu_e, E_h * mu_h

        if diffusion:
            v_e, v_h = add_diffusion(v_e, v_h)

        # Calculate induced current
        # Only if electrons are still drifting
        if np.any(sel_e):
            # Weighting field in V/um
            W_e = pot_w_descr.get_field(p_e[0, sel_e], p_e[1, sel_e])

            # Induced charge in C/s, Q = E_w * v * q * dt
            dQ_e = (W_e[0] * v_e[0] + W_e[1] * v_e[1]) * \
                - q0[sel_e] * dt * 1e-5

            # Reduce induced charge due to trapping
            if t_e_trapping:
                t_e = t_e_trapping + t_e_t1 * np.sqrt(E_e[0]**2 +
                                                      E_e[1]**2) * 1e-4
                dQ_e *= np.exp(-dt * step / t_e)

            if t_r:
                dQ_e *= np.exp(-dt * step / (t_r / 2.2))

            dQ_e_step[sel_e] += dQ_e

            Q_ind_tot_e[sel_e] += dQ_e

            I_ind_tot[step] += dQ_e.sum() / dt

        if np.any(sel_h):  # Only if holes are still drifting
            # Weighting field in V/um
            W_h = pot_w_descr.get_field(p_h[0, sel_h], p_h[1, sel_h])

            # Induced charge in C/s, Q = E_w * v * q * dt
            dQ_h = (W_h[0] * v_h[0] + W_h[1] * v_h[1]) * \
                q0[sel_h] * dt * 1e-5

            # Reduce induced charge due to trapping
            if t_h_trapping:
                t_h = t_h_trapping + t_h_t1 * np.sqrt(E_h[0]**2 +
                                                      E_h[1]**2) * 1e-4
                dQ_h *= np.exp(-dt * step / t_h)

            if t_r:
                dQ_h *= np.exp(-dt * step / (t_r / 2.2))

            dQ_h_step[sel_h] += dQ_h

            Q_ind_tot_h[sel_h] += dQ_h

            I_ind_tot[step] += dQ_h.sum() / dt

        # Store
        store_if_needed(step,
                        next_step,
                        Q_ind_tot_e=Q_ind_tot_e,
                        Q_ind_tot_h=Q_ind_tot_h,
                        dt=dt,
                        dQ_e=dQ_e,
                        dQ_h=dQ_h,
                        T=T,
                        I_ind_e=I_ind_e,
                        I_ind_h=I_ind_h,
                        p_e=p_e,
                        p_h=p_h,
                        q_max=q0,
                        i_step=i_step,
                        n_store=n_store,
                        sel_e=sel_e,
                        sel_h=sel_h)

        # Position change in um
        d_p_e, d_p_h = v_e * dt * 1e-5, v_h * dt * 1e-5

        # Update position
        p_e[:, sel_e] = p_e[:, sel_e] + d_p_e
        p_h[:, sel_h] = p_h[:, sel_h] + d_p_h

        # Correct boundaries (e.g. leaving sensor due to diffusion)
        _correct_boundary(geom_descr,
                          pot_descr,
                          x=p_e[0, :],
                          y=p_e[1, :],
                          sel=sel_e,
                          is_electron=True)
        _correct_boundary(geom_descr,
                          pot_descr,
                          x=p_h[0, :],
                          y=p_h[1, :],
                          sel=sel_h,
                          is_electron=False)

        # Check boundaries and update selection
        sel_e, new_e = _in_boundary(geom_descr,
                                    pot_descr=pot_descr,
                                    x=p_e[0, :],
                                    y=p_e[1, :],
                                    sel=sel_e)
        sel_h, new_h = _in_boundary(geom_descr,
                                    pot_descr=pot_descr,
                                    x=p_h[0, :],
                                    y=p_h[1, :],
                                    sel=sel_h)

        # Force a storing step at for e-h pairs where one is finished
        next_step[new_e] = step + 1
        next_step[new_h] = step + 1

        p_e[:, ~sel_e] = np.nan
        p_h[:, ~sel_h] = np.nan

        progress_bar.update(step)
    progress_bar.finish()

    return traj_e, traj_h, I_ind_e, I_ind_h, T, I_ind_tot, Q_ind_tot_e, Q_ind_tot_h
Exemplo n.º 17
0
def create_hitor_calibration(output_filename, plot_pixel_calibrations=False):
    '''Generating HitOr calibration file (_calibration.h5) from raw data file and plotting of calibration data.

    Parameters
    ----------
    output_filename : string
        Input raw data file name.
    plot_pixel_calibrations : bool, iterable
        If True, genearating additional pixel calibration plots. If list of column and row tuples (from 1 to 80 / 336), print selected pixels.

    Returns
    -------
    nothing
    '''
    logging.info('Analyze HitOR calibration data and plot results of %s',
                 output_filename)

    with AnalyzeRawData(raw_data_file=output_filename, create_pdf=True
                        ) as analyze_raw_data:  # Interpret the raw data file
        analyze_raw_data.create_occupancy_hist = False  # too many scan parameters to do in ram histogramming
        analyze_raw_data.create_hit_table = True
        analyze_raw_data.create_tdc_hist = True
        analyze_raw_data.align_at_tdc = True  # align events at TDC words, first word of event has to be a tdc word
        analyze_raw_data.interpret_word_table()
        analyze_raw_data.interpreter.print_summary()
        analyze_raw_data.plot_histograms()
        n_injections = analyze_raw_data.n_injections  # use later

        with tb.open_file(
                analyze_raw_data._analyzed_data_file, 'r'
        ) as in_file_h5:  # Get scan parameters from interpreted file
            meta_data = in_file_h5.root.meta_data[:]
            scan_parameters_dict = get_scan_parameter(meta_data)
            inner_loop_parameter_values = scan_parameters_dict[next(
                reversed(scan_parameters_dict)
            )]  # inner loop parameter name is unknown
            scan_parameter_names = scan_parameters_dict.keys()
            #             col_row_combinations = get_unique_scan_parameter_combinations(in_file_h5.root.meta_data[:], scan_parameters=('column', 'row'), scan_parameter_columns_only=True)

            meta_data_table_at_scan_parameter = get_unique_scan_parameter_combinations(
                meta_data, scan_parameters=scan_parameter_names)
            scan_parameter_values = get_scan_parameters_table_from_meta_data(
                meta_data_table_at_scan_parameter, scan_parameter_names)
            event_number_ranges = get_ranges_from_array(
                meta_data_table_at_scan_parameter['event_number'])
            event_ranges_per_parameter = np.column_stack(
                (scan_parameter_values, event_number_ranges))
            hits = in_file_h5.root.Hits[:]
            event_numbers = hits['event_number'].copy(
            )  # create contigous array, otherwise np.searchsorted too slow, http://stackoverflow.com/questions/15139299/performance-of-numpy-searchsorted-is-poor-on-structured-arrays

            output_filename = os.path.splitext(output_filename)[0]
            with tb.open_file(output_filename + "_calibration.h5",
                              mode="w") as calibration_data_file:
                logging.info('Create calibration')
                calibration_data = np.full(
                    shape=(80, 336, len(inner_loop_parameter_values), 4),
                    fill_value=np.nan,
                    dtype='f4'
                )  # result of the calibration is a histogram with col_index, row_index, plsrDAC value, mean discrete tot, rms discrete tot, mean tot from TDC, rms tot from TDC

                progress_bar = progressbar.ProgressBar(
                    widgets=[
                        '',
                        progressbar.Percentage(), ' ',
                        progressbar.Bar(marker='*', left='|', right='|'), ' ',
                        progressbar.AdaptiveETA()
                    ],
                    maxval=len(event_ranges_per_parameter),
                    term_width=80)
                progress_bar.start()

                for index, (
                        actual_scan_parameter_values, event_start,
                        event_stop) in enumerate(event_ranges_per_parameter):
                    if event_stop is None:  # happens for the last chunk
                        event_stop = hits[-1]['event_number'] + 1
                    array_index = np.searchsorted(
                        event_numbers, np.array([event_start, event_stop]))
                    actual_hits = hits[array_index[0]:array_index[1]]
                    for item_index, item in enumerate(scan_parameter_names):
                        if item == "column":
                            actual_col = actual_scan_parameter_values[
                                item_index]
                        elif item == "row":
                            actual_row = actual_scan_parameter_values[
                                item_index]
                        elif item == "PlsrDAC":
                            plser_dac = actual_scan_parameter_values[
                                item_index]
                        else:
                            raise ValueError("Unknown scan parameter %s" %
                                             item)

                    # Only pixel of actual column/row should be in the actual data chunk but since SRAM is not cleared for each scan step due to speed reasons and there might be noisy pixels this is not always the case
                    n_wrong_pixel = np.count_nonzero(
                        np.logical_or(actual_hits['column'] != actual_col,
                                      actual_hits['row'] != actual_row))
                    if n_wrong_pixel != 0:
                        logging.warning(
                            '%d hit(s) from other pixels for scan parameters %s',
                            n_wrong_pixel, ', '.join([
                                '%s=%s' % (name, value)
                                for (name, value
                                     ) in zip(scan_parameter_names,
                                              actual_scan_parameter_values)
                            ]))

                    actual_hits = actual_hits[np.logical_and(
                        actual_hits['column'] == actual_col, actual_hits['row']
                        == actual_row)]  # Only take data from selected pixel
                    actual_tdc_hits = actual_hits[
                        (actual_hits['event_status'] & 0b0000111110011100) ==
                        0b0000000100000000]  # only take hits from good events (one TDC word only, no error)
                    actual_tot_hits = actual_hits[
                        (actual_hits['event_status'] & 0b0000100010011100) ==
                        0b0000000000000000]  # only take hits from good events for tot
                    tot, tdc = actual_tot_hits['tot'], actual_tdc_hits['TDC']

                    if tdc.shape[0] < n_injections:
                        logging.info(
                            '%d of %d expected TDC hits for scan parameters %s',
                            tdc.shape[0], n_injections, ', '.join([
                                '%s=%s' % (name, value)
                                for (name, value
                                     ) in zip(scan_parameter_names,
                                              actual_scan_parameter_values)
                            ]))
                    if tot.shape[0] < n_injections:
                        logging.info(
                            '%d of %d expected hits for scan parameters %s',
                            tot.shape[0], n_injections, ', '.join([
                                '%s=%s' % (name, value)
                                for (name, value
                                     ) in zip(scan_parameter_names,
                                              actual_scan_parameter_values)
                            ]))

                    inner_loop_scan_parameter_index = np.where(
                        plser_dac == inner_loop_parameter_values
                    )[0][
                        0]  # translate the scan parameter value to an index for the result histogram
                    # numpy mean and std return nan if array is empty
                    calibration_data[actual_col - 1, actual_row - 1,
                                     inner_loop_scan_parameter_index,
                                     0] = np.mean(tot)
                    calibration_data[actual_col - 1, actual_row - 1,
                                     inner_loop_scan_parameter_index,
                                     1] = np.mean(tdc)
                    calibration_data[actual_col - 1, actual_row - 1,
                                     inner_loop_scan_parameter_index,
                                     2] = np.std(tot)
                    calibration_data[actual_col - 1, actual_row - 1,
                                     inner_loop_scan_parameter_index,
                                     3] = np.std(tdc)

                    progress_bar.update(index)
                progress_bar.finish()

                calibration_data_out = calibration_data_file.create_carray(
                    calibration_data_file.root,
                    name='HitOrCalibration',
                    title='Hit OR calibration data',
                    atom=tb.Atom.from_dtype(calibration_data.dtype),
                    shape=calibration_data.shape,
                    filters=tb.Filters(complib='blosc',
                                       complevel=5,
                                       fletcher32=False))
                calibration_data_out[:] = calibration_data
                calibration_data_out.attrs.dimensions = scan_parameter_names
                calibration_data_out.attrs.scan_parameter_values = inner_loop_parameter_values
                calibration_data_out.flush()
                #                 with PdfPages(output_filename + "_calibration.pdf") as output_pdf:
                plot_scurves(calibration_data[:, :, :, 0],
                             inner_loop_parameter_values,
                             "ToT calibration",
                             "ToT",
                             15,
                             "Charge [PlsrDAC]",
                             filename=analyze_raw_data.output_pdf)
                plot_scurves(calibration_data[:, :, :, 1],
                             inner_loop_parameter_values,
                             "TDC calibration",
                             "TDC [ns]",
                             None,
                             "Charge [PlsrDAC]",
                             filename=analyze_raw_data.output_pdf)
                tot_mean_all_pix = np.nanmean(calibration_data[:, :, :, 0],
                                              axis=(0, 1))
                tot_error_all_pix = np.nanstd(calibration_data[:, :, :, 0],
                                              axis=(0, 1))
                tdc_mean_all_pix = np.nanmean(calibration_data[:, :, :, 1],
                                              axis=(0, 1))
                tdc_error_all_pix = np.nanstd(calibration_data[:, :, :, 1],
                                              axis=(0, 1))
                plot_tot_tdc_calibration(
                    scan_parameters=inner_loop_parameter_values,
                    tot_mean=tot_mean_all_pix,
                    tot_error=tot_error_all_pix,
                    tdc_mean=tdc_mean_all_pix,
                    tdc_error=tdc_error_all_pix,
                    filename=analyze_raw_data.output_pdf,
                    title="Mean charge calibration of %d pixel(s)" %
                    np.count_nonzero(~np.all(
                        np.isnan(calibration_data[:, :, :, 0]), axis=2)))
                # plotting individual pixels
                if plot_pixel_calibrations is True:
                    # selecting pixels with non-nan entries
                    col_row_non_nan = np.nonzero(~np.all(
                        np.isnan(calibration_data[:, :, :, 0]), axis=2))
                    plot_pixel_calibrations = np.dstack(col_row_non_nan)[0]
                elif plot_pixel_calibrations is False:
                    plot_pixel_calibrations = np.array([], dtype=np.int)
                else:  # assuming list of column / row tuples
                    plot_pixel_calibrations = np.array(
                        plot_pixel_calibrations) - 1
                # generate index array
                pixel_indices = np.arange(plot_pixel_calibrations.shape[0])
                plot_n_pixels = 10  # number of pixels at the beginning, center and end of the array
                np.random.seed(0)
                # select random pixels
                if pixel_indices.size - 2 * plot_n_pixels >= 0:
                    random_pixel_indices = np.sort(
                        np.random.choice(
                            pixel_indices[plot_n_pixels:-plot_n_pixels],
                            min(plot_n_pixels,
                                pixel_indices.size - 2 * plot_n_pixels),
                            replace=False))
                else:
                    random_pixel_indices = np.array([], dtype=np.int)
                selected_pixel_indices = np.unique(
                    np.hstack([
                        pixel_indices[:plot_n_pixels], random_pixel_indices,
                        pixel_indices[-plot_n_pixels:]
                    ]))
                # plotting individual pixels
                for (column,
                     row) in plot_pixel_calibrations[selected_pixel_indices]:
                    logging.info(
                        "Plotting charge calibration for pixel column " +
                        str(column + 1) + " / row " + str(row + 1))
                    tot_mean_single_pix = calibration_data[column, row, :, 0]
                    tot_std_single_pix = calibration_data[column, row, :, 2]
                    tdc_mean_single_pix = calibration_data[column, row, :, 1]
                    tdc_std_single_pix = calibration_data[column, row, :, 3]
                    plot_tot_tdc_calibration(
                        scan_parameters=inner_loop_parameter_values,
                        tot_mean=tot_mean_single_pix,
                        tot_error=tot_std_single_pix,
                        tdc_mean=tdc_mean_single_pix,
                        tdc_error=tdc_std_single_pix,
                        filename=analyze_raw_data.output_pdf,
                        title="Charge calibration for pixel column " +
                        str(column + 1) + " / row " + str(row + 1))
Exemplo n.º 18
0
        def update(self, pbar):
            try:
                return str(sets[pbar.currval-1])
            except IndexError:
                return ''
    widgets = ['Benchmark: ',
               pbar.Percentage(),
               ' ',
               pbar.Counter(), '/', str(n),
               ' ',
               Counter(),
               ' ',
               pbar.Bar(marker='-'),
               ' ',
               pbar.AdaptiveETA(),
               ' ',
               ]
    pbar = pbar.ProgressBar(widgets=widgets, maxval=n).start()

    # go johnny go, go!
    for i, it in enumerate(sets):
        size, storage, entropy, codec, level = it

        if size == 'small':
            number = 10
            repeat = 10
        elif size == 'mid':
            number = 5
            repeat = 5
        elif size == 'large':
Exemplo n.º 19
0
    def analyze(self):
        logging.info('Analysing the PlsrDAC waveforms')
        with tb.open_file(self.output_filename + '.h5', 'r') as in_file_h5:
            data = in_file_h5.root.PlsrDACwaveforms[:]
            times = np.array(in_file_h5.root.PlsrDACwaveforms._v_attrs.times)
            scan_parameter_values = in_file_h5.root.PlsrDACwaveforms._v_attrs.scan_parameter_values
            trigger_levels = in_file_h5.root.PlsrDACwaveforms._v_attrs.trigger_levels
            fit_range = ast.literal_eval(
                in_file_h5.root.configuration.run_conf[:][np.where(
                    in_file_h5.root.configuration.run_conf[:]['name'] ==
                    'fit_range')]['value'][0])
            fit_range_step = ast.literal_eval(
                in_file_h5.root.configuration.run_conf[:][np.where(
                    in_file_h5.root.configuration.run_conf[:]['name'] ==
                    'fit_range_step')]['value'][0])
            progress_bar = progressbar.ProgressBar(widgets=[
                '',
                progressbar.Percentage(), ' ',
                progressbar.Bar(marker='*', left='|', right='|'), ' ',
                progressbar.AdaptiveETA()
            ],
                                                   maxval=data.shape[0],
                                                   term_width=80)

            with tb.open_file(self.output_filename + '_interpreted.h5',
                              'w') as out_file_h5:
                description = [('PlsrDAC', np.uint32),
                               ('voltage_step', np.float)
                               ]  # output data table description
                data_array = np.zeros((data.shape[0], ), dtype=description)
                data_table = out_file_h5.create_table(
                    out_file_h5.root,
                    name='plsr_dac_data',
                    description=np.zeros((1, ), dtype=description).dtype,
                    title=
                    'Voltage steps from transient PlsrDAC calibration scan')
                with PdfPages(self.output_filename +
                              '_interpreted.pdf') as output_pdf:
                    progress_bar.start()
                    for index in range(data.shape[0]):
                        voltages = data[index]
                        trigger_level = trigger_levels[index]
                        plsr_dac = scan_parameter_values[index]
                        if abs(trigger_level) < 0.005:
                            logging.warning(
                                'The trigger threshold for PlsrDAC %d is with %d mV too low. Thus this setting is omitted in the analysis!',
                                plsr_dac, trigger_level * 1000.)
                            data_array['voltage_step'][index] = np.NaN
                            continue
                        step_index = np.where(
                            np.abs(voltages - trigger_level) == np.amin(
                                np.abs(voltages - trigger_level)))[0][0]

                        left_step_fit_range = (step_index +
                                               fit_range_step[0][0],
                                               step_index +
                                               fit_range_step[0][1])
                        right_step_fit_range = (step_index +
                                                fit_range_step[1][0],
                                                step_index +
                                                fit_range_step[1][1])

                        # Error handling if selected fit range exeeds limits
                        if left_step_fit_range[0] < 0 or left_step_fit_range[
                                1] < 0 or right_step_fit_range[0] >= data.shape[
                                    1] or right_step_fit_range[1] >= data.shape[
                                        1] or left_step_fit_range[
                                            0] >= left_step_fit_range[
                                                1] or right_step_fit_range[
                                                    0] >= right_step_fit_range[
                                                        1]:
                            logging.warning(
                                'The step fit limits for PlsrDAC %d are out of bounds. Omit this data!',
                                plsr_dac)
                            data_array['voltage_step'][index] = np.NaN
                            continue

                        times_left_step, voltage_left_step = times[
                            left_step_fit_range[0]:
                            left_step_fit_range[1]], voltages[
                                left_step_fit_range[0]:left_step_fit_range[1]]
                        times_right_step, voltage_right_step = times[
                            right_step_fit_range[0]:right_step_fit_range[
                                1]], voltages[right_step_fit_range[0]:
                                              right_step_fit_range[1]]

                        median_left_step = np.median(voltage_left_step)
                        median_right_step = np.median(voltage_right_step)

                        data_array['PlsrDAC'][index] = plsr_dac
                        data_array['voltage_step'][
                            index] = median_left_step - median_right_step

                        # Plot waveform + fit
                        plt.clf()
                        plt.grid()
                        plt.plot(times * 1e9, voltages * 1e3, label='Data')
                        plt.plot(times * 1e9,
                                 np.repeat([trigger_level * 1e3], len(times)),
                                 '--',
                                 label='Trigger (%d mV)' %
                                 (trigger_level * 1000))
                        plt.plot(times_left_step * 1e9,
                                 np.repeat(median_left_step * 1e3,
                                           times_left_step.shape[0]),
                                 '-',
                                 linewidth=2,
                                 label='Left of step constant fit')
                        plt.plot(times_right_step * 1e9,
                                 np.repeat(median_right_step * 1e3,
                                           times_right_step.shape[0]),
                                 '-',
                                 linewidth=2,
                                 label='Right of step constant fit')
                        plt.title('PulserDAC %d waveform' % plsr_dac)
                        plt.xlabel('Time [ns]')
                        plt.ylabel('Voltage [mV]')
                        plt.legend(loc=0)
                        output_pdf.savefig()
                        progress_bar.update(index)
                    data_table.append(data_array[np.isfinite(
                        data_array['voltage_step'])])  # store valid data

                    # Plot, fit and store linear PlsrDAC transfer function
                    x, y = data_array[np.isfinite(
                        data_array['voltage_step'])]['PlsrDAC'], data_array[
                            np.isfinite(
                                data_array['voltage_step'])]['voltage_step']
                    fit = polyfit(
                        x[np.logical_and(x >= fit_range[0],
                                         x <= fit_range[1])],
                        y[np.logical_and(x >= fit_range[0],
                                         x <= fit_range[1])], 1)
                    fit_fn = poly1d(fit)
                    plt.clf()
                    plt.plot(x, y, '.-', label='data')
                    plt.plot(x, fit_fn(x), '--k', label=str(fit_fn))
                    plt.title('PlsrDAC calibration')
                    plt.xlabel('PlsrDAC')
                    plt.ylabel('Voltage step [V]')
                    plt.grid(True)
                    plt.legend(loc=0)
                    output_pdf.savefig()
                    # Store result in file
                    self.register.calibration_parameters[
                        'Vcal_Coeff_0'] = fit[1] * 1000.  # store in mV
                    self.register.calibration_parameters[
                        'Vcal_Coeff_1'] = fit[0] * 1000.  # store in mV/DAC
            progress_bar.finish()
Exemplo n.º 20
0
def histogram_tdc_hits(input_file_hits,
                       hit_selection_conditions,
                       event_status_select_mask,
                       event_status_condition,
                       calibration_file=None,
                       correct_calibration=None,
                       max_tdc=1000,
                       ignore_disabled_regions=True,
                       n_bins=200,
                       plot_data=True):
    for condition in hit_selection_conditions:
        logging.info('Histogram TDC hits with %s', condition)

    def get_charge(
            max_tdc, tdc_calibration_values,
            tdc_pixel_calibration):  # return the charge from calibration
        charge_calibration = np.zeros(shape=(80, 336, max_tdc))
        for column in range(80):
            for row in range(336):
                actual_pixel_calibration = tdc_pixel_calibration[column,
                                                                 row, :]
                # Only take pixels with at least 3 valid calibration points
                if np.count_nonzero(
                        actual_pixel_calibration != 0
                ) > 2 and np.count_nonzero(
                        np.isfinite(actual_pixel_calibration)) > 2:
                    selected_measurements = np.isfinite(
                        actual_pixel_calibration
                    )  # Select valid calibration steps
                    selected_actual_pixel_calibration = actual_pixel_calibration[
                        selected_measurements]
                    selected_tdc_calibration_values = tdc_calibration_values[
                        selected_measurements]
                    interpolation = interp1d(
                        x=selected_actual_pixel_calibration,
                        y=selected_tdc_calibration_values,
                        kind='slinear',
                        bounds_error=False,
                        fill_value=0)
                    charge_calibration[column, row, :] = interpolation(
                        np.arange(max_tdc))
        return charge_calibration

    def plot_tdc_tot_correlation(data, condition, output_pdf):
        logging.info('Plot correlation histogram for %s', condition)
        data = np.ma.array(data, mask=(data <= 0))
        if np.ma.any(data > 0):
            fig = Figure()
            FigureCanvas(fig)
            ax = fig.add_subplot(111)
            cmap = cm.get_cmap('jet', 200)
            cmap.set_bad('w')
            ax.set_title('Correlation with %s' % condition)
            norm = colors.LogNorm()
            z_max = data.max(fill_value=0)
            ax.set_xlabel('TDC')
            ax.set_ylabel('TOT')
            im = ax.imshow(data,
                           cmap=cmap,
                           norm=norm,
                           aspect='auto',
                           interpolation='nearest')  # , norm=norm)
            divider = make_axes_locatable(ax)
            ax.invert_yaxis()
            cax = divider.append_axes("right", size="5%", pad=0.1)
            fig.colorbar(im,
                         cax=cax,
                         ticks=np.linspace(start=0,
                                           stop=z_max,
                                           num=9,
                                           endpoint=True))
            output_pdf.savefig(fig)
        else:
            logging.warning('No data for correlation plotting for %s',
                            condition)

    def plot_hits_per_condition(output_pdf):
        logging.info(
            'Plot hits selection efficiency histogram for %d conditions',
            len(hit_selection_conditions) + 2)
        labels = ['All Hits', 'Hits of\ngood events']
        for condition in hit_selection_conditions:
            condition = re.sub('[&]', '\n', condition)
            condition = re.sub('[()]', '', condition)
            labels.append(condition)
        fig = Figure()
        FigureCanvas(fig)
        ax = fig.add_subplot(111)
        ax.bar(range(len(n_hits_per_condition)),
               n_hits_per_condition,
               align='center')
        ax.set_xticks(range(len(n_hits_per_condition)), labels)
        ax.tick_params(axis='x', labelsize=8)
        ax.set_title('Number of hits for different cuts')
        ax.set_yscale('log')
        ax.set_ylabel('#')
        ax.grid(True)
        for x, y in zip(np.arange(len(n_hits_per_condition)),
                        n_hits_per_condition):
            ax.annotate('%d' %
                        (float(y) / float(n_hits_per_condition[0]) * 100.0) +
                        r'%',
                        xy=(x, y / 2.0),
                        xycoords='data',
                        color='grey',
                        size=15)
        output_pdf.savefig(fig)

    def plot_corrected_tdc_hist(x, y, title, output_pdf, point_style='-'):
        logging.info('Plot TDC hist with TDC calibration')
        fig = Figure()
        FigureCanvas(fig)
        ax = fig.add_subplot(111)
        y /= np.amax(y) if y.shape[0] > 0 else y
        ax.plot(x, y, point_style)
        ax.set_title(title, size=10)
        ax.set_xlabel('Charge [PlsrDAC]')
        ax.set_ylabel('Count [a.u.]')
        ax.grid(True)
        output_pdf.savefig(fig)

    def get_calibration_correction(
        tdc_calibration, tdc_calibration_values, filename_new_calibration
    ):  # correct the TDC calibration with the TDC calib in filename_new_calibration by shifting the means
        with tb.open_file(filename_new_calibration, 'r') as in_file_2:
            with PdfPages(
                    os.path.splitext(filename_new_calibration)[0] +
                    '.pdf') as output_pdf:
                charge_calibration_1, charge_calibration_2 = tdc_calibration, in_file_2.root.HitOrCalibration[:, :, :,
                                                                                                              1]

                plsr_dacs = tdc_calibration_values
                if not np.all(plsr_dacs == in_file_2.root.HitOrCalibration.
                              _v_attrs.scan_parameter_values):
                    raise NotImplementedError(
                        'The check calibration file has to have the same PlsrDAC values'
                    )

                # Valid pixel have a calibration in the new and the old calibration
                valid_pixel = np.where(
                    ~np.all((charge_calibration_1 == 0), axis=2)
                    & ~np.all(np.isnan(charge_calibration_1), axis=2)
                    & ~np.all((charge_calibration_2 == 0), axis=2)
                    & ~np.all(np.isnan(charge_calibration_2), axis=2))
                mean_charge_calibration = np.nanmean(
                    charge_calibration_2[valid_pixel], axis=0)
                offset_mean = np.nanmean((charge_calibration_2[valid_pixel] -
                                          charge_calibration_1[valid_pixel]),
                                         axis=0)

                dPlsrDAC_dTDC = analysis_utils.smooth_differentiation(
                    plsr_dacs,
                    mean_charge_calibration,
                    order=3,
                    smoothness=0,
                    derivation=1)
                fig = Figure()
                FigureCanvas(fig)
                ax = fig.add_subplot(111)
                ax.plot(plsr_dacs,
                        offset_mean / dPlsrDAC_dTDC,
                        '.-',
                        label='PlsrDAC')
                ax.plot(plsr_dacs, offset_mean, '.-', label='TDC')
                ax.grid(True)
                ax.set_xlabel('PlsrDAC')
                ax.set_ylabel('Mean calibration offset')
                ax.legend(loc=0)
                ax.set_title(
                    'Mean offset between TDC calibration data, new - old ')
                output_pdf.savefig(fig)
                return offset_mean

    def delete_disabled_regions(hits, enable_mask):
        n_hits = hits.shape[0]

        # Tread no hits case
        if n_hits == 0:
            return hits

        # Column, row array with True for disabled pixels
        disabled_mask = ~enable_mask.astype(np.bool).T.copy()
        disabled_region = disabled_mask.copy()
        n_disabled_pixels = np.count_nonzero(disabled_region)

        # Extend disabled pixel mask by the neighbouring pixels
        neighbour_pixels = [(-1, 0), (1, 0), (0, -1),
                            (0, 1)]  # Disable direct neighbouring pixels
        for neighbour_pixel in neighbour_pixels:
            disabled_region = np.logical_or(
                disabled_region,
                shift(disabled_mask, shift=neighbour_pixel, cval=0))

        logging.info(
            'Masking %d additional pixel neighbouring %d disabled pixels',
            np.count_nonzero(disabled_region) - n_disabled_pixels,
            n_disabled_pixels)

        # Make 1D selection array with disabled pixels
        disabled_pixels = np.where(disabled_region)
        disabled_pixels_1d = (
            disabled_pixels[0] + 1) * disabled_region.shape[1] + (
                disabled_pixels[1] + 1
            )  # + 1 because pixel index 0,0 has column/row = 1

        hits_1d = hits['column'].astype(
            np.uint32) * disabled_region.shape[1] + hits[
                'row']  # change dtype to fit new number
        hits = hits[np.in1d(hits_1d, disabled_pixels_1d, invert=True)]

        logging.info('Lost %d hits (%d percent) due to disabling neighbours',
                     n_hits - hits.shape[0],
                     (1. - float(hits.shape[0]) / n_hits) * 100)

        return hits

    # Create data
    with tb.open_file(input_file_hits, mode="r") as in_hit_file_h5:
        cluster_hit_table = in_hit_file_h5.root.ClusterHits
        try:
            enabled_pixels = in_hit_file_h5.root.ClusterHits._v_attrs.enabled_pixels[:]
        except AttributeError:  # Old and simulate data do not have this info
            logging.warning(
                'No enabled pixel mask found in data! Assume all pixels are enabled.'
            )
            enabled_pixels = np.ones(shape=(336, 80))

        # Result hists, initialized per condition
        pixel_tdc_hists_per_condition = [
            np.zeros(shape=(80, 336, max_tdc), dtype=np.uint16)
            for _ in hit_selection_conditions
        ] if hit_selection_conditions else []
        pixel_tdc_timestamp_hists_per_condition = [
            np.zeros(shape=(80, 336, 256), dtype=np.uint16)
            for _ in hit_selection_conditions
        ] if hit_selection_conditions else []
        mean_pixel_tdc_hists_per_condition = [
            np.zeros(shape=(80, 336), dtype=np.uint16)
            for _ in hit_selection_conditions
        ] if hit_selection_conditions else []
        mean_pixel_tdc_timestamp_hists_per_condition = [
            np.zeros(shape=(80, 336), dtype=np.uint16)
            for _ in hit_selection_conditions
        ] if hit_selection_conditions else []
        tdc_hists_per_condition = [
            np.zeros(shape=(max_tdc), dtype=np.uint16)
            for _ in hit_selection_conditions
        ] if hit_selection_conditions else []
        tdc_corr_hists_per_condition = [
            np.zeros(shape=(max_tdc, 16), dtype=np.uint32)
            for _ in hit_selection_conditions
        ] if hit_selection_conditions else []

        n_hits_per_condition = [
            0 for _ in range(len(hit_selection_conditions) + 2)
        ]  # condition 1, 2 are all hits, hits of goode events

        logging.info(
            'Select hits and create TDC histograms for %d cut conditions',
            len(hit_selection_conditions))
        progress_bar = progressbar.ProgressBar(
            widgets=[
                '',
                progressbar.Percentage(), ' ',
                progressbar.Bar(marker='*', left='|', right='|'), ' ',
                progressbar.AdaptiveETA()
            ],
            maxval=cluster_hit_table.shape[0],
            term_width=80)
        progress_bar.start()
        for cluster_hits, _ in analysis_utils.data_aligned_at_events(
                cluster_hit_table, chunk_size=10000000):
            n_hits_per_condition[0] += cluster_hits.shape[0]
            selected_events_cluster_hits = cluster_hits[np.logical_and(
                cluster_hits['TDC'] < max_tdc,
                (cluster_hits['event_status']
                 & event_status_select_mask) == event_status_condition)]
            n_hits_per_condition[1] += selected_events_cluster_hits.shape[0]
            for index, condition in enumerate(hit_selection_conditions):
                selected_cluster_hits = analysis_utils.select_hits(
                    selected_events_cluster_hits, condition)
                if ignore_disabled_regions:
                    selected_cluster_hits = delete_disabled_regions(
                        hits=selected_cluster_hits, enable_mask=enabled_pixels)

                n_hits_per_condition[2 +
                                     index] += selected_cluster_hits.shape[0]
                column, row, tdc = selected_cluster_hits[
                    'column'] - 1, selected_cluster_hits[
                        'row'] - 1, selected_cluster_hits['TDC']
                pixel_tdc_hists_per_condition[
                    index] += fast_analysis_utils.hist_3d_index(
                        column, row, tdc, shape=(80, 336, max_tdc))
                mean_pixel_tdc_hists_per_condition[index] = np.average(
                    pixel_tdc_hists_per_condition[index],
                    axis=2,
                    weights=range(0, max_tdc)) * np.sum(np.arange(
                        0,
                        max_tdc)) / pixel_tdc_hists_per_condition[index].sum(
                            axis=2)
                tdc_timestamp = selected_cluster_hits['TDC_time_stamp']
                pixel_tdc_timestamp_hists_per_condition[
                    index] += fast_analysis_utils.hist_3d_index(column,
                                                                row,
                                                                tdc_timestamp,
                                                                shape=(80, 336,
                                                                       256))
                mean_pixel_tdc_timestamp_hists_per_condition[
                    index] = np.average(
                        pixel_tdc_timestamp_hists_per_condition[index],
                        axis=2,
                        weights=range(0, 256)) * np.sum(np.arange(
                            0, 256)) / pixel_tdc_timestamp_hists_per_condition[
                                index].sum(axis=2)
                tdc_hists_per_condition[index] = pixel_tdc_hists_per_condition[
                    index].sum(
                        axis=(0, 1), dtype=np.uint32
                    )  # fix dtype, sum will otherwise increase precision
                tdc_corr_hists_per_condition[
                    index] += fast_analysis_utils.hist_2d_index(
                        tdc, selected_cluster_hits['tot'], shape=(max_tdc, 16))
            progress_bar.update(n_hits_per_condition[0])
        progress_bar.finish()

        # Take TDC calibration if available and calculate charge for each TDC value and pixel
        if calibration_file is not None:
            with tb.open_file(calibration_file,
                              mode="r") as in_file_calibration_h5:
                tdc_calibration = in_file_calibration_h5.root.HitOrCalibration[:, :, :,
                                                                               1]
                tdc_calibration_values = in_file_calibration_h5.root.HitOrCalibration.attrs.scan_parameter_values[:]
                if correct_calibration is not None:
                    tdc_calibration += get_calibration_correction(
                        tdc_calibration, tdc_calibration_values,
                        correct_calibration)
            charge_calibration = get_charge(max_tdc, tdc_calibration_values,
                                            tdc_calibration)
        else:
            charge_calibration = None

        # Store data of result histograms
        with tb.open_file(os.path.splitext(input_file_hits)[0] +
                          '_tdc_hists.h5',
                          mode="w") as out_file_h5:
            for index, condition in enumerate(hit_selection_conditions):
                pixel_tdc_hist_result = np.swapaxes(
                    pixel_tdc_hists_per_condition[index], 0, 1)
                pixel_tdc_timestamp_hist_result = np.swapaxes(
                    pixel_tdc_timestamp_hists_per_condition[index], 0, 1)
                mean_pixel_tdc_hist_result = np.swapaxes(
                    mean_pixel_tdc_hists_per_condition[index], 0, 1)
                mean_pixel_tdc_timestamp_hist_result = np.swapaxes(
                    mean_pixel_tdc_timestamp_hists_per_condition[index], 0, 1)
                tdc_hists_per_condition_result = tdc_hists_per_condition[index]
                tdc_corr_hist_result = np.swapaxes(
                    tdc_corr_hists_per_condition[index], 0, 1)
                # Create result hists
                out_1 = out_file_h5.create_carray(
                    out_file_h5.root,
                    name='HistPixelTdcCondition_%d' % index,
                    title='Hist Pixel Tdc with %s' % condition,
                    atom=tb.Atom.from_dtype(pixel_tdc_hist_result.dtype),
                    shape=pixel_tdc_hist_result.shape,
                    filters=tb.Filters(complib='blosc',
                                       complevel=5,
                                       fletcher32=False))
                out_2 = out_file_h5.create_carray(
                    out_file_h5.root,
                    name='HistPixelTdcTimestampCondition_%d' % index,
                    title='Hist Pixel Tdc Timestamp with %s' % condition,
                    atom=tb.Atom.from_dtype(
                        pixel_tdc_timestamp_hist_result.dtype),
                    shape=pixel_tdc_timestamp_hist_result.shape,
                    filters=tb.Filters(complib='blosc',
                                       complevel=5,
                                       fletcher32=False))
                out_3 = out_file_h5.create_carray(
                    out_file_h5.root,
                    name='HistMeanPixelTdcCondition_%d' % index,
                    title='Hist Mean Pixel Tdc with %s' % condition,
                    atom=tb.Atom.from_dtype(mean_pixel_tdc_hist_result.dtype),
                    shape=mean_pixel_tdc_hist_result.shape,
                    filters=tb.Filters(complib='blosc',
                                       complevel=5,
                                       fletcher32=False))
                out_4 = out_file_h5.create_carray(
                    out_file_h5.root,
                    name='HistMeanPixelTdcTimestampCondition_%d' % index,
                    title='Hist Mean Pixel Tdc Timestamp with %s' % condition,
                    atom=tb.Atom.from_dtype(
                        mean_pixel_tdc_timestamp_hist_result.dtype),
                    shape=mean_pixel_tdc_timestamp_hist_result.shape,
                    filters=tb.Filters(complib='blosc',
                                       complevel=5,
                                       fletcher32=False))
                out_5 = out_file_h5.create_carray(
                    out_file_h5.root,
                    name='HistTdcCondition_%d' % index,
                    title='Hist Tdc with %s' % condition,
                    atom=tb.Atom.from_dtype(
                        tdc_hists_per_condition_result.dtype),
                    shape=tdc_hists_per_condition_result.shape,
                    filters=tb.Filters(complib='blosc',
                                       complevel=5,
                                       fletcher32=False))
                out_6 = out_file_h5.create_carray(
                    out_file_h5.root,
                    name='HistTdcCorrCondition_%d' % index,
                    title='Hist Correlation Tdc/Tot with %s' % condition,
                    atom=tb.Atom.from_dtype(tdc_corr_hist_result.dtype),
                    shape=tdc_corr_hist_result.shape,
                    filters=tb.Filters(complib='blosc',
                                       complevel=5,
                                       fletcher32=False))
                # Add result hists information
                out_1.attrs.dimensions, out_1.attrs.condition, out_1.attrs.tdc_values = 'column, row, TDC value', condition, range(
                    max_tdc)
                out_2.attrs.dimensions, out_2.attrs.condition, out_2.attrs.tdc_values = 'column, row, TDC time stamp value', condition, range(
                    256)
                out_3.attrs.dimensions, out_3.attrs.condition = 'column, row, mean TDC value', condition
                out_4.attrs.dimensions, out_4.attrs.condition = 'column, row, mean TDC time stamp value', condition
                out_5.attrs.dimensions, out_5.attrs.condition = 'PlsrDAC', condition
                out_6.attrs.dimensions, out_6.attrs.condition = 'TDC, TOT', condition
                out_1[:], out_2[:], out_3[:], out_4[:], out_5[:], out_6[:] = pixel_tdc_hist_result, pixel_tdc_timestamp_hist_result, mean_pixel_tdc_hist_result, mean_pixel_tdc_timestamp_hist_result, tdc_hists_per_condition_result, tdc_corr_hist_result

                if charge_calibration is not None:
                    # Select only valid pixel for histogramming: they have data and a calibration (that is any charge(TDC) calibration != 0)
                    valid_pixel = np.where(
                        np.logical_and(
                            charge_calibration[:, :, :max_tdc].sum(axis=2) > 0,
                            pixel_tdc_hist_result[:, :, :max_tdc].swapaxes(
                                0, 1).sum(axis=2) > 0))
                    # Create charge histogram with mean TDC calibration
                    mean_charge_calibration = charge_calibration[
                        valid_pixel][:, :max_tdc].mean(axis=0)
                    mean_tdc_hist = pixel_tdc_hist_result.swapaxes(
                        0, 1)[valid_pixel][:, :max_tdc].mean(axis=0)
                    result_array = np.rec.array(np.column_stack(
                        (mean_charge_calibration, mean_tdc_hist)),
                                                dtype=[('charge', float),
                                                       ('count', float)])
                    out_7 = out_file_h5.create_table(
                        out_file_h5.root,
                        name='HistMeanTdcCalibratedCondition_%d' % index,
                        description=result_array.dtype,
                        title='Hist Tdc with mean charge calibration and %s' %
                        condition,
                        filters=tb.Filters(complib='blosc',
                                           complevel=5,
                                           fletcher32=False))
                    out_7.attrs.condition = condition
                    out_7.attrs.n_pixel = valid_pixel[0].shape[0]
                    out_7.attrs.n_hits = pixel_tdc_hist_result.swapaxes(
                        0, 1)[valid_pixel][:, :max_tdc].sum()
                    out_7.append(result_array)
                    # Create charge histogram with per pixel TDC calibration
                    x, y = charge_calibration[valid_pixel][:, :max_tdc].ravel(
                    ), np.ravel(
                        pixel_tdc_hist_result.swapaxes(
                            0, 1)[valid_pixel][:, :max_tdc].ravel())
                    y_hist, x_hist = y[x > 0], x[
                        x >
                        0]  # remove the hit tdcs without proper calibration plsrDAC(TDC) calibration
                    x, y, yerr = analysis_utils.get_profile_histogram(
                        x_hist, y_hist, n_bins=n_bins)
                    result_array = np.rec.array(np.column_stack((x, y, yerr)),
                                                dtype=[('charge', float),
                                                       ('count', float),
                                                       ('count_error', float)])
                    out_8 = out_file_h5.create_table(
                        out_file_h5.root,
                        name='HistTdcCalibratedCondition_%d' % index,
                        description=result_array.dtype,
                        title=
                        'Hist Tdc with per pixel charge calibration and %s' %
                        condition,
                        filters=tb.Filters(complib='blosc',
                                           complevel=5,
                                           fletcher32=False))
                    out_8.attrs.condition = condition
                    out_8.attrs.n_pixel = valid_pixel[0].shape[0]
                    out_8.attrs.n_hits = y_hist.sum()
                    out_8.append(result_array)

    # Plot Data
    if plot_data:
        with PdfPages(
                os.path.splitext(input_file_hits)[0] +
                '_calibrated_tdc_hists.pdf') as output_pdf:
            plot_hits_per_condition(output_pdf)
            with tb.open_file(os.path.splitext(input_file_hits)[0] +
                              '_tdc_hists.h5',
                              mode="r") as in_file_h5:
                for node in in_file_h5.root:  # go through the data and plot them
                    if 'MeanPixel' in node.name:
                        try:
                            plot_three_way(
                                np.ma.masked_invalid(node[:]) * 1.5625,
                                title='Mean TDC delay, hits with\n%s' %
                                node._v_attrs.condition[:80] if 'Timestamp'
                                in node.name else 'Mean TDC, hits with\n%s' %
                                node._v_attrs.condition[:80],
                                filename=output_pdf)
                        except ValueError:
                            logging.warning('Cannot plot TDC delay')
                    elif 'HistTdcCondition' in node.name:
                        hist_1d = node[:]
                        entry_index = np.where(hist_1d != 0)
                        if entry_index[0].shape[0] != 0:
                            max_index = np.amax(entry_index)
                        else:
                            max_index = max_tdc
                        plot_1d_hist(
                            hist_1d[:max_index + 10],
                            title='TDC histogram, hits with\n%s' %
                            node._v_attrs.condition[:80]
                            if 'Timestamp' not in node.name else
                            'TDC time stamp histogram, hits with\n%s' %
                            node._v_attrs.condition[:80],
                            x_axis_title='TDC' if 'Timestamp' not in node.name
                            else 'TDC time stamp',
                            filename=output_pdf)
                    elif 'HistPixelTdc' in node.name:
                        hist_3d = node[:]
                        entry_index = np.where(hist_3d.sum(axis=(0, 1)) != 0)
                        if entry_index[0].shape[0] != 0:
                            max_index = np.amax(entry_index)
                        else:
                            max_index = max_tdc
                        best_pixel_index = np.where(
                            hist_3d.sum(axis=2) == np.amax(node[:].sum(
                                axis=2)))
                        if best_pixel_index[0].shape[
                                0] == 1:  # there could be more than one pixel with most hits
                            try:
                                plot_1d_hist(
                                    hist_3d[best_pixel_index][0, :max_index],
                                    title='TDC histogram of pixel %d, %d\n%s' %
                                    (best_pixel_index[1] + 1,
                                     best_pixel_index[0] + 1,
                                     node._v_attrs.condition[:80])
                                    if 'Timestamp' not in node.name else
                                    'TDC time stamp histogram, hits of pixel %d, %d'
                                    % (best_pixel_index[1] + 1,
                                       best_pixel_index[0] + 1),
                                    x_axis_title='TDC'
                                    if 'Timestamp' not in node.name[:80] else
                                    'TDC time stamp',
                                    filename=output_pdf)
                            except IndexError:
                                logging.warning(
                                    'Cannot plot pixel TDC histogram')
                    elif 'HistTdcCalibratedCondition' in node.name:
                        plot_corrected_tdc_hist(
                            node[:]['charge'],
                            node[:]['count'],
                            title=
                            'TDC histogram, %d pixel, per pixel TDC calib.\n%s'
                            % (node._v_attrs.n_pixel,
                               node._v_attrs.condition[:80]),
                            output_pdf=output_pdf)
                    elif 'HistMeanTdcCalibratedCondition' in node.name:
                        plot_corrected_tdc_hist(
                            node[:]['charge'],
                            node[:]['count'],
                            title='TDC histogram, %d pixel, mean TDC calib.\n%s'
                            % (node._v_attrs.n_pixel,
                               node._v_attrs.condition[:80]),
                            output_pdf=output_pdf)
                    elif 'HistTdcCorr' in node.name:
                        plot_tdc_tot_correlation(node[:],
                                                 node._v_attrs.condition,
                                                 output_pdf)
Exemplo n.º 21
0
    def __init__(self, trainer):
        default_params = {
            "report_times_every_epoch": None,
            "report_interval_iters": 100,
            "record_file": "train.csv",
            "use_tensorboard": False
        }
        self.trainer = trainer
        default_params = utils.assign_params_dict(default_params,
                                                  self.trainer.params)

        if default_params["report_times_every_epoch"] is not None:
            self.report_interval_iters = max(
                1, self.trainer.training_point[2] //
                default_params["report_times_every_epoch"])
        else:
            self.report_interval_iters = default_params[
                "report_interval_iters"]

        if default_params["use_tensorboard"]:
            from tensorboardX import SummaryWriter
            model_name = os.path.basename(self.trainer.params["model_dir"])
            time_string = time.strftime('%Y-%m-%d-%H-%M-%S',
                                        time.localtime(time.time()))
            self.board_writer = SummaryWriter(
                "{}/log/{}-{}-tensorboard".format(
                    self.trainer.params["model_dir"], model_name, time_string))
        else:
            self.board_writer = None

        self.epochs = self.trainer.params["epochs"]

        self.optimizer = self.trainer.elements["optimizer"]

        # For optimizer wrapper such as lookahead.
        if getattr(self.optimizer, "optimizer", None) is not None:
            self.optimizer = self.optimizer.optimizer

        self.device = "[{0}]".format(
            utils.get_device(self.trainer.elements["model"]))

        self.record_value = []

        self.start_write_log = False
        if default_params["record_file"] != "" and default_params[
                "record_file"] is not None:
            self.record_file = "{0}/log/{1}".format(
                self.trainer.params["model_dir"],
                default_params["record_file"])

            # The case to recover training
            if self.trainer.params["start_epoch"] > 0:
                self.start_write_log = True
            elif os.path.exists(self.record_file):
                # Do backup to avoid clearing the loss log when re-running a same launcher.
                bk_file = "{0}.bk.{1}".format(
                    self.record_file,
                    time.strftime('%Y_%m_%d.%H_%M_%S',
                                  time.localtime(time.time())))
                shutil.move(self.record_file, bk_file)
        else:
            self.record_file = None

        # A format to show progress
        # Do not use progressbar.Bar(marker="\x1b[32m█\x1b[39m") and progressbar.SimpleProgress(format='%(value_s)s/%(max_value_s)s') to avoid too long string.
        widgets = [
            progressbar.Percentage(format='%(percentage)3.2f%%'), " | ",
            "Epoch:",
            progressbar.Variable('current_epoch',
                                 format='{formatted_value}',
                                 width=0,
                                 precision=0), "/{0}, ".format(self.epochs),
            "Iter:",
            progressbar.Variable('current_iter',
                                 format='{formatted_value}',
                                 width=0,
                                 precision=0),
            "/{0}".format(self.trainer.training_point[2]), " (",
            progressbar.Timer(format='ELA: %(elapsed)s'), ", ",
            progressbar.AdaptiveETA(), ")"
        ]

        max_value = self.trainer.params[
            "epochs"] * self.trainer.training_point[2]

        self.bar = progressbar.ProgressBar(max_value=max_value,
                                           widgets=widgets,
                                           redirect_stdout=True)

        # Use multi-process for update.
        self.queue = Queue()
        self.process = Process(target=self._update, daemon=True)
        self.process.start()
Exemplo n.º 22
0
    def fit(self, X, B, T, W=None):
        '''Fits the model.

        :param X: numpy matrix of shape :math:`k \\cdot n`
        :param B: numpy vector of shape :math:`n`
        :param T: numpy vector of shape :math:`n`
        :param W: (optional) numpy vector of shape :math:`n`
        '''

        if W is None:
            W = numpy.ones(len(X))
        X, B, T, W = (Z if type(Z) == numpy.ndarray else numpy.array(Z)
                      for Z in (X, B, T, W))
        keep_indexes = (T > 0) & (B >= 0) & (B <= 1) & (W >= 0)
        if sum(keep_indexes) < X.shape[0]:
            n_removed = X.shape[0] - sum(keep_indexes)
            warnings.warn('Warning! Removed %d/%d entries from inputs where '
                          'T <= 0 or B not 0/1 or W < 0' % (n_removed, len(X)))
            X, B, T, W = (Z[keep_indexes] for Z in (X, B, T, W))
        n_features = X.shape[1]

        # scipy.optimize and emcee forces the the parameters to be a vector:
        # (log k, log p, log sigma_alpha, log sigma_beta,
        #  a, b, alpha_1...alpha_k, beta_1...beta_k)
        # Generalized Gamma is a bit sensitive to the starting point!
        x0 = numpy.zeros(6 + 2 * n_features)
        x0[0] = +1 if self._fix_k is None else log(self._fix_k)
        x0[1] = -1 if self._fix_p is None else log(self._fix_p)
        args = (X, B, T, W, self._fix_k, self._fix_p, self._hierarchical,
                self._flavor)

        # Set up progressbar and callback
        bar = progressbar.ProgressBar(widgets=[
            progressbar.Variable('loss', width=15, precision=9), ' ',
            progressbar.BouncingBar(), ' ',
            progressbar.Counter(width=6), ' [',
            progressbar.Timer(), ']'
        ])

        def callback(LL, value_history=[]):
            value_history.append(LL)
            bar.update(len(value_history), loss=LL)

        # Define objective and use automatic differentiation
        f = lambda x: -generalized_gamma_loss(x, *args, callback=callback)
        jac = autograd.grad(lambda x: -generalized_gamma_loss(x, *args))

        # Find the maximum a posteriori of the distribution
        res = scipy.optimize.minimize(f,
                                      x0,
                                      jac=jac,
                                      method='SLSQP',
                                      options={'maxiter': 9999})
        if not res.success:
            raise Exception('Optimization failed with message: %s' %
                            res.message)
        result = {'map': res.x}

        # TODO: should not use fixed k/p as search parameters
        if self._fix_k:
            result['map'][0] = log(self._fix_k)
        if self._fix_p:
            result['map'][1] = log(self._fix_p)

        # Make sure we're in a local minimum
        gradient = jac(result['map'])
        gradient_norm = numpy.dot(gradient, gradient)
        if gradient_norm >= 1e-2 * len(X):
            warnings.warn('Might not have found a local minimum! '
                          'Norm of gradient is %f' % gradient_norm)

        # Let's sample from the posterior to compute uncertainties
        if self._ci:
            dim, = res.x.shape
            n_walkers = 5 * dim
            sampler = emcee.EnsembleSampler(
                nwalkers=n_walkers,
                ndim=dim,
                log_prob_fn=generalized_gamma_loss,
                args=args,
            )
            mcmc_initial_noise = 1e-3
            p0 = [
                result['map'] + mcmc_initial_noise * numpy.random.randn(dim)
                for i in range(n_walkers)
            ]
            n_burnin = 100
            n_steps = numpy.ceil(2000. / n_walkers)
            n_iterations = n_burnin + n_steps

            bar = progressbar.ProgressBar(max_value=n_iterations,
                                          widgets=[
                                              progressbar.Percentage(), ' ',
                                              progressbar.Bar(),
                                              ' %d walkers [' % n_walkers,
                                              progressbar.AdaptiveETA(), ']'
                                          ])
            for i, _ in enumerate(sampler.sample(p0, iterations=n_iterations)):
                bar.update(i + 1)
            result['samples'] = sampler.chain[:, n_burnin:, :] \
                                       .reshape((-1, dim)).T
            if self._fix_k:
                result['samples'][0, :] = log(self._fix_k)
            if self._fix_p:
                result['samples'][1, :] = log(self._fix_p)

        self.params = {
            k: {
                'k': exp(data[0]),
                'p': exp(data[1]),
                'a': data[4],
                'b': data[5],
                'alpha': data[6:6 + n_features].T,
                'beta': data[6 + n_features:6 + 2 * n_features].T,
            }
            for k, data in result.items()
        }
Exemplo n.º 23
0
Arquivo: loom.py Projeto: GiovanH/ng18
    def finish(self, resume=False, verbose=False, use_pbar=True):
        """Block and complete all threads in queue.
        
        Args:
            resume (bool, optional): Resume spooling after finished
            verbose (bool, optional): Report progress towards queue completion.
            use_pbar (bool, optional): Graphically display progress towards queue completions
        """
        # Stop existing spools
        self.background_spool = False
        self.dirty.set()

        # By default, we don't use a callback.
        cb = None

        if verbose:
            print(self)

        # Progress bar management, optional.
        self._pbar_max = self.numRunningThreads + len(self.queue)
        if use_pbar and self._pbar_max > 0:
            widgets = [
                ("[{n}] ".format(n=self.name) if self.name else ''), progressbar.Percentage(),
                ' ', progressbar.SimpleProgress(format='%(value_s)s of %(max_value_s)s'),
                ' ', progressbar.Bar(),
                ' ', DynamicProgressString(name="state"),
                ' ', progressbar.Timer(),
                ' ', progressbar.AdaptiveETA(),
            ]
            self.progbar = progbar = progressbar.ProgressBar(max_value=self._pbar_max, widgets=widgets, redirect_stdout=True)

            def updateProgrssBar():
                # Update progress bar.
                q = (len(self.queue) if self.queue else 0)
                nrt = self.numRunningThreads
                progress = (self._pbar_max - (nrt + q))
                state = "[Spool: Q: {q:2}, R: {nrt:2}/{quota}]".format(quota=self.quota, **locals())
                progbar.max_value = self._pbar_max
                progbar.update(progress, state=state)
            cb = updateProgrssBar

        # assert not self.spoolThread.isAlive, "Background loop did not terminate"
        # Create a spoolloop, but block until it deploys all threads.
        execif(cb)
        while (self.queue and len(self.queue) > 0) or (self.numRunningThreads > 0):
            self.dirty.wait()
            self.doSpool(verbose=verbose)        
            self.dirty.clear()
            execif(cb)

        assert len(self.queue) == 0, "Finished without deploying all threads"
        assert self.numRunningThreads == 0, "Finished without finishing all threads"
        
        if cb:
            progbar.finish()

        if resume:
            self.queue.clear()  # Create a fresh queue
            self.start()

        if verbose:
            print(self)
Exemplo n.º 24
0
    [
        "Maven build: ",
        get_colour(Fore.YELLOW),
        progressbar.Percentage(),
        get_colour(Fore.RESET),
        " ",
        progressbar.Counter(format='(%(value)d of %(max_value)d)'),
        get_colour(Fore.LIGHTGREEN_EX),
        progressbar.Bar(marker="\u2588"),
        get_colour(Fore.RESET),
        " ",
        progressbar.Timer(),
        " ",
        get_colour(Fore.MAGENTA),
        progressbar.AbsoluteETA(format='Finishes: %(eta)s', format_finished='Finished at %(eta)s')
        if absolute_time else progressbar.AdaptiveETA(),
        get_colour(Fore.RESET)
    ]


def ansi_length(o):
    ansi_occ = re.findall(r'\x1B\[[0-?]*[ -/]*[@-~]', o)
    ansi_len = 0
    for occ in ansi_occ:
        ansi_len += len(occ)
    return len(o) - ansi_len


def match():
    count = 0
    bar = None
Exemplo n.º 25
0
    def processingLayer3(self):
        """
        Processing for RGB image using Fourier transform to aproach the segmentation
        """
        print(" ")
        print("Preprocessing - Layer 3 processing")
        f = np.fft.fft2(self.image)
        fshift = np.fft.fftshift(f)
        magnitude_spectrum = (20 * np.log(np.abs(fshift))).astype('uint8')

        ## Mean procedure
        mean_fourier = np.array([
            magnitude_spectrum[:, :, 0].mean(),
            magnitude_spectrum[:, :, 1].mean(), magnitude_spectrum[:, :,
                                                                   2].mean()
        ])

        ## Range procedure
        hist_01 = cv2.calcHist([magnitude_spectrum], [0, 1], None, [256, 256],
                               [0, 256, 0, 256])
        hist_02 = cv2.calcHist([magnitude_spectrum], [0, 2], None, [256, 256],
                               [0, 256, 0, 256])
        val_01 = np.percentile(hist_01, self.percentFou)
        val_02 = np.percentile(hist_02, self.percentFou)
        mask_01 = hist_01 > val_01
        mask_02 = hist_02 > val_02
        cnts_01, _ = cv2.findContours((mask_01.copy()).astype('uint8'),
                                      cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        cnts_02, _ = cv2.findContours((mask_02.copy()).astype('uint8'),
                                      cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        for c in cnts_01:
            (x1, y1, w1, h1) = cv2.boundingRect(np.array(c))
        for h in cnts_02:
            (x2, y2, w2, h2) = cv2.boundingRect(np.array(h))

        ### Range fourier
        rangeFourier = np.array([[x2, x2 + w2], [x1, x1 + w1], [y1, y1 + h1]])

        ##### Procesing of the image uding mean aproach

        fourier = np.copy(magnitude_spectrum)
        image_trasnform = np.copy(self.image)
        size_x, size_y, size_c = fourier.shape
        binary_fourier = np.zeros([size_x, size_y], dtype='uint8')
        val = np.zeros(size_c)
        ### Utilizando media de color
        percent = 0.83
        inicio = time.time()
        area = size_x * size_y
        widgets = [
            progressbar.Percentage(), ' ',
            progressbar.Bar(), ' ',
            progressbar.ETA(), ' ',
            progressbar.AdaptiveETA()
        ]
        bar = progressbar.ProgressBar(widgets=widgets, maxval=area)
        bar.start()
        for x in range(0, size_x):
            for y in range(0, size_y):
                for c in range(0, size_c):
                    val[c] = np.abs(1 - (
                        (image_trasnform[x, y, c]) - mean_fourier[c]) / 255)
                    if (val[c] < percent):
                        val[0] = 0
                        break
                    elif (val[c] >= percent):
                        val[c] = 1  #se cumple la condición de rango se deja el pixel
                if (val[0] == 0):
                    binary_fourier[x, y] = 0
                elif (val.all() == 1):
                    binary_fourier[x, y] = 1
                val = np.zeros(size_c)
            fraction = x * y
            bar.update(fraction)
        final = time.time() - inicio
        bar.update(area)
        print("Tiempo de procesamiento : ", round(final, 2), "Segundos")

        ### Morphological Process
        # First do a dilatation
        radio = 2
        sel = disk(radio)
        binary_dilat1 = dilation(binary_fourier, sel)
        for i in range(0, 2):
            binary_dilat1 = dilation(binary_dilat1, sel)
        # Second erase little objects
        radio = 5
        sel = disk(radio)
        binary_erosion1 = erosion(binary_dilat1.copy(), sel)
        for i in range(0, 2):
            binary_erosion1 = erosion(binary_erosion1, sel)
        # Then dilate again
        radio = 3
        sel = disk(radio)
        binary = dilation(binary_erosion1.copy(), sel)

        return binary, binary_fourier, rangeFourier, mean_fourier
Exemplo n.º 26
0
def analyze_data(scan_data_filenames, ignore_columns, fei4b=False):
    logging.info("Analyzing and plotting results...")
    output_h5_filename = local_configuration['output_data_filename'] + '.h5'
    logging.info('Saving calibration in: %s' % output_h5_filename)

    if local_configuration['create_plots'] or local_configuration['create_result_plots']:
        output_pdf_filename = local_configuration['output_data_filename'] + '.pdf'
        logging.info('Saving plots in: %s' % output_pdf_filename)
        output_pdf = PdfPages(output_pdf_filename)

    # define output data structures
    mean_threshold_calibration = np.zeros(shape=(len(local_configuration['delays']),), dtype='<f8')  # array to hold the analyzed data in ram
    mean_threshold_rms_calibration = np.zeros_like(mean_threshold_calibration)  # array to hold the analyzed data in ram
    mean_noise_calibration = np.zeros_like(mean_threshold_calibration)  # array to hold the analyzed data in ram
    mean_noise_rms_calibration = np.zeros_like(mean_threshold_calibration)  # array to hold the analyzed data in ram
    threshold_calibration = np.zeros(shape=(80, 336, len(local_configuration['delays'])), dtype='<f8')  # array to hold the analyzed data in ram
    noise_calibration = np.zeros_like(threshold_calibration)  # array to hold the analyzed data in ram
    mean_threshold_calibration_1 = np.zeros_like(mean_threshold_calibration)  # array to hold the analyzed data in ram
    mean_threshold_rms_calibration_1 = np.zeros_like(mean_threshold_calibration)  # array to hold the analyzed data in ram
    threshold_calibration_1 = np.zeros_like(threshold_calibration)  # array to hold the analyzed data in ram
    mean_threshold_calibration_2 = np.zeros_like(mean_threshold_calibration)  # array to hold the analyzed data in ram
    mean_threshold_rms_calibration_2 = np.zeros_like(mean_threshold_calibration)  # array to hold the analyzed data in ram
    threshold_calibration_2 = np.zeros_like(threshold_calibration)  # array to hold the analyzed data in ram
    # initialize progress bar
    progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=len(local_configuration['delays']), term_width=80)
    progress_bar.start()
    # loop over all delay values and analyze the corresponding data
    for delay_index, delay_value in enumerate(local_configuration['delays']):
        # interpret the raw data from the actual delay value
        raw_data_file = scan_data_filenames[delay_value]
        analyzed_data_file = os.path.splitext(raw_data_file)[0] + '_interpreted.h5'
        analyze(raw_data_file=raw_data_file, analyzed_data_file=analyzed_data_file, fei4b=fei4b)

        scan_parameters = None
        with tb.open_file(analyzed_data_file, mode="r") as in_file_h5:
            # mask the not scanned columns for analysis and plotting
            mask = np.logical_or(mask_columns(pixel_array=in_file_h5.root.HistThresholdFitted[:], ignore_columns=ignore_columns), mask_pixel(steps=3, shift=0).T) == 0
            occupancy_masked = mask_columns(pixel_array=in_file_h5.root.HistOcc[:], ignore_columns=ignore_columns)
            thresholds_masked = np.ma.masked_array(in_file_h5.root.HistThresholdFitted[:], mask)
            noise_masked = np.ma.masked_array(in_file_h5.root.HistNoiseFitted[:], mask)
            # plot the threshold distribution and the s curves
            if local_configuration['create_plots']:
                plotting.plot_three_way(hist=thresholds_masked * 55.0, title='Threshold Fitted for delay = ' + str(delay_value), x_axis_title='threshold [e]', filename=output_pdf)
                plotting.plot_relative_bcid(hist=in_file_h5.root.HistRelBcid[0:16], title='Relative BCID (former LVL1ID) for delay = ' + str(delay_value), filename=output_pdf)
                plotting.plot_event_errors(hist=in_file_h5.root.HistErrorCounter[:], title='Event status for delay = ' + str(delay_value), filename=output_pdf)
            meta_data_array = in_file_h5.root.meta_data[:]
            parameter_settings = analysis_utils.get_scan_parameter(meta_data_array=meta_data_array)
            scan_parameters = parameter_settings['PlsrDAC']
            if local_configuration['create_plots']:
                plotting.plot_scurves(occupancy_hist=occupancy_masked, title='S-Curves, delay ' + str(delay_value), scan_parameters=scan_parameters, scan_parameter_name='PlsrDAC', filename=output_pdf)
            # fill the calibration data arrays
            mean_threshold_calibration[delay_index] = np.ma.mean(thresholds_masked)
            mean_threshold_rms_calibration[delay_index] = np.ma.std(thresholds_masked)
            threshold_calibration[:, :, delay_index] = thresholds_masked.T
            mean_noise_calibration[delay_index] = np.ma.mean(noise_masked)
            mean_noise_rms_calibration[delay_index] = np.ma.std(noise_masked)
            noise_calibration[:, :, delay_index] = noise_masked.T

        # if activated analyze also the trigger seperately
        if local_configuration['analysis_two_trigger']:
            with tb.open_file(os.path.splitext(analyzed_data_file)[0] + '_analyzed_1.h5', mode="r") as in_file_1_h5:
                with tb.open_file(os.path.splitext(analyzed_data_file)[0] + '_analyzed_2.h5', mode="r") as in_file_2_h5:
                    # mask the not scanned columns for analysis and plotting
                    try:
                        occupancy_masked_1 = occupancy_masked = mask_columns(pixel_array=in_file_1_h5.root.HistOcc[:], ignore_columns=ignore_columns)
                        thresholds_masked_1 = np.ma.masked_array(in_file_1_h5.root.HistThresholdFitted[:], mask)
                        rel_bcid_1 = in_file_1_h5.root.HistRelBcid[0:16]
                    except tb.exceptions.NoSuchNodeError:
                        occupancy_masked_1 = np.zeros(shape=(336, 80, 2))
                        thresholds_masked_1 = np.zeros(shape=(336, 80))
                        rel_bcid_1 = np.zeros(shape=(16, ))
                    try:
                        occupancy_masked_2 = occupancy_masked = mask_columns(pixel_array=in_file_2_h5.root.HistOcc[:], ignore_columns=ignore_columns)
                        thresholds_masked_2 = np.ma.masked_array(in_file_2_h5.root.HistThresholdFitted[:], mask)
                        rel_bcid_2 = in_file_2_h5.root.HistRelBcid[0:16]
                    except tb.exceptions.NoSuchNodeError:
                        occupancy_masked_2 = np.zeros(shape=(336, 80, 2))
                        thresholds_masked_2 = np.zeros(shape=(336, 80))
                        rel_bcid_2 = np.zeros(shape=(16, ))
                    # plot the threshold distribution and the s curves
                    if local_configuration['create_plots']:
                        plotting.plot_three_way(hist=thresholds_masked_1 * 55.0, title='Threshold Fitted for 1. trigger, delay ' + str(delay_value), x_axis_title='threshold [e]', filename=output_pdf)
                        plotting.plot_relative_bcid(hist=rel_bcid_1, title='Relative BCID (former LVL1ID) for 1. trigger, delay = ' + str(delay_value), filename=output_pdf)
                        plotting.plot_three_way(hist=thresholds_masked_2 * 55.0, title='Threshold Fitted for 2. trigger, delay ' + str(delay_value), x_axis_title='threshold [e]', filename=output_pdf)
                        plotting.plot_relative_bcid(hist=rel_bcid_2, title='Relative BCID (former LVL1ID) for 2. trigger, delay = ' + str(delay_value), filename=output_pdf)
                    if local_configuration['create_plots']:
                        plotting.plot_scurves(occupancy_hist=occupancy_masked_1, title='S-Curves 1. trigger, delay ' + str(delay_value), scan_parameters=scan_parameters, scan_parameter_name='PlsrDAC', filename=output_pdf)
                        plotting.plot_scurves(occupancy_hist=occupancy_masked_2, title='S-Curves 2. trigger, delay ' + str(delay_value), scan_parameters=scan_parameters, scan_parameter_name='PlsrDAC', filename=output_pdf)
                    # fill the calibration data arrays
                    mean_threshold_calibration_1[delay_index] = np.ma.mean(thresholds_masked_1)
                    mean_threshold_rms_calibration_1[delay_index] = np.ma.std(thresholds_masked_1)
                    threshold_calibration_1[:, :, delay_index] = thresholds_masked_1.T
                    mean_threshold_calibration_2[delay_index] = np.ma.mean(thresholds_masked_2)
                    mean_threshold_rms_calibration_2[delay_index] = np.ma.std(thresholds_masked_2)
                    threshold_calibration_2[:, :, delay_index] = thresholds_masked_2.T
        progress_bar.update(delay_index)
    progress_bar.finish()

    # plot the parameter against delay plots
    if local_configuration['create_result_plots']:
        plotting.plot_scatter(x=local_configuration['delays'], y=mean_threshold_calibration * 55.0, title='Threshold as a function of the delay', x_label='delay [BCID]', y_label='Mean threshold [e]', log_x=False, filename=output_pdf)
        plotting.plot_scatter(x=local_configuration['delays'], y=mean_threshold_calibration * 55.0, title='Threshold as a function of the delay', x_label='delay [BCID]', y_label='Mean threshold [e]', log_x=True, filename=output_pdf)
        plotting.plot_scatter(x=local_configuration['delays'], y=mean_threshold_rms_calibration * 55.0, title='Threshold as a function of the delay', x_label='delay [BCID]', y_label='Threshold RMS [e]', log_x=False, filename=output_pdf)
        plotting.plot_scatter(x=local_configuration['delays'], y=mean_threshold_rms_calibration * 55.0, title='Threshold as a function of the delay', x_label='delay [BCID]', y_label='Threshold RMS [e]', log_x=True, filename=output_pdf)
        if local_configuration['analysis_two_trigger']:
            plotting.plot_scatter(x=local_configuration['delays'], y=mean_threshold_calibration_1 * 55.0, title='Threshold as a function of the delay, 1. trigger', x_label='delay [BCID]', y_label='Mean threshold [e]', log_x=False, filename=output_pdf)
            plotting.plot_scatter(x=local_configuration['delays'], y=mean_threshold_calibration_1 * 55.0, title='Threshold as a function of the delay, 1. trigger', x_label='delay [BCID]', y_label='Mean threshold [e]', log_x=True, filename=output_pdf)
            plotting.plot_scatter(x=local_configuration['delays'], y=mean_threshold_rms_calibration_1 * 55.0, title='Threshold as a function of the delay, 1. trigger', x_label='delay [BCID]', y_label='Threshold RMS [e]', log_x=False, filename=output_pdf)
            plotting.plot_scatter(x=local_configuration['delays'], y=mean_threshold_rms_calibration_1 * 55.0, title='Threshold as a function of the delay, 1. trigger', x_label='delay [BCID]', y_label='Threshold RMS [e]', log_x=True, filename=output_pdf)
            plotting.plot_scatter(x=local_configuration['delays'], y=mean_threshold_calibration_2 * 55.0, title='Threshold as a function of the delay, 2. trigger', x_label='delay [BCID]', y_label='Mean threshold [e]', log_x=False, filename=output_pdf)
            plotting.plot_scatter(x=local_configuration['delays'], y=mean_threshold_calibration_2 * 55.0, title='Threshold as a function of the delay, 2. trigger', x_label='delay [BCID]', y_label='Mean threshold [e]', log_x=True, filename=output_pdf)
            plotting.plot_scatter(x=local_configuration['delays'], y=mean_threshold_rms_calibration_2 * 55.0, title='Threshold as a function of the delay, 2. trigger', x_label='delay [BCID]', y_label='Threshold RMS [e]', log_x=False, filename=output_pdf)
            plotting.plot_scatter(x=local_configuration['delays'], y=mean_threshold_rms_calibration_2 * 55.0, title='Threshold as a function of the delay, 2. trigger', x_label='delay [BCID]', y_label='Threshold RMS [e]', log_x=True, filename=output_pdf)

        plotting.plot_scatter(x=local_configuration['delays'], y=mean_noise_calibration * 55.0, title='Noise as a function of the delay', x_label='delay [BCID]', y_label='Mean noise [e]', log_x=False, filename=output_pdf)
        plotting.plot_scatter(x=local_configuration['delays'], y=mean_noise_rms_calibration * 55.0, title='Noise as a function of the delay', x_label='delay [BCID]', y_label='Noise RMS [e]', log_x=False, filename=output_pdf)

    if local_configuration['create_plots'] or local_configuration['create_result_plots']:
        output_pdf.close()

    # store the calibration data into a hdf5 file as an easy to read table and as an array for quick data access
    with tb.open_file(output_h5_filename, mode="w") as out_file_h5:
        store_calibration_data_as_array(out_file_h5, mean_threshold_calibration, mean_threshold_rms_calibration, threshold_calibration, mean_noise_calibration, mean_noise_rms_calibration, noise_calibration)
        store_calibration_data_as_table(out_file_h5, mean_threshold_calibration, mean_threshold_rms_calibration, threshold_calibration, mean_noise_calibration, mean_noise_rms_calibration, noise_calibration)
Exemplo n.º 27
0
import logging
import datetime
import warnings
import traceback
import io
import pprint
import inspect
import numpy
import xarray
import progressbar

#: progressbar widget used to display progress for various reading tasks
my_pb_widget = [
    progressbar.Bar("=", "[", "]"), " ",
    progressbar.Percentage(), " (",
    progressbar.AdaptiveETA(), " -> ",
    progressbar.AbsoluteETA(), ') '
]


def add_to_argparse(parser,
                    include_period=True,
                    include_sat=0,
                    include_channels=True,
                    include_temperatures=False,
                    include_debug=False,
                    include_version=False):
    """Add commoners to argparse object.

    Helper function to add flags to :class:`argparse.ArgumentParser`
    objects, where the exact same flags are occurring in multiple scripts.
Exemplo n.º 28
0
def select_trigger_hits(input_file_hits, output_file_hits_1, output_file_hits_2):
    if (not os.path.isfile(output_file_hits_1) and not os.path.isfile(output_file_hits_2)) or local_configuration['overwrite_output_files']:
        with tb.open_file(input_file_hits, mode="r") as in_hit_file_h5:
            hit_table_in = in_hit_file_h5.root.Hits
            with tb.open_file(output_file_hits_1, mode="w") as out_hit_file_1_h5:
                with tb.open_file(output_file_hits_2, mode="w") as out_hit_file_2_h5:
                    hit_table_out_1 = out_hit_file_1_h5.create_table(out_hit_file_1_h5.root, name='Hits', description=data_struct.HitInfoTable, title='hit_data', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                    hit_table_out_2 = out_hit_file_2_h5.create_table(out_hit_file_2_h5.root, name='Hits', description=data_struct.HitInfoTable, title='hit_data', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
                    progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=hit_table_in.shape[0], term_width=80)
                    progress_bar.start()
                    for data, index in analysis_utils.data_aligned_at_events(hit_table_in, chunk_size=5000000):
                        hit_table_out_1.append(data[data['LVL1ID'] % 2 == 1])  # first trigger hits
                        hit_table_out_2.append(data[data['LVL1ID'] % 2 == 0])  # second trigger hits
                        progress_bar.update(index)
                    progress_bar.finish()
                    in_hit_file_h5.root.meta_data.copy(out_hit_file_1_h5.root)  # copy meta_data note to new file
                    in_hit_file_h5.root.meta_data.copy(out_hit_file_2_h5.root)  # copy meta_data note to new file
Exemplo n.º 29
0
    def scan(self):
        # Stop mode related hacks to read all hits stored with stop mode
        self.register.set_global_register_value("StopModeCnfg", 1)
        stop_mode_cmd = self.register.get_commands("WrRegister",
                                                   name=["StopModeCnfg"])[0]
        self.register.set_global_register_value("StopModeCnfg", 0)
        stop_mode_off_cmd = self.register.get_commands("WrRegister",
                                                       name=["StopModeCnfg"
                                                             ])[0]

        self.register.set_global_register_value("StopClkPulse", 1)
        stop_clock_pulse_cmd_high = self.register.get_commands(
            "WrRegister", name=["StopClkPulse"])[0]
        self.register.set_global_register_value("StopClkPulse", 0)
        stop_clock_pulse_cmd_low = self.register.get_commands(
            "WrRegister", name=["StopClkPulse"])[0]

        start_sequence = self.register_utils.concatenate_commands((
            self.register.get_commands("zeros", length=self.trigger_delay)[0],
            stop_mode_cmd,
            self.register.get_commands("zeros", length=20)[0],
            stop_clock_pulse_cmd_high,  # FIXME: before ConfMode?
            self.register.get_commands("zeros", length=50)[0],
            self.register.get_commands("ConfMode")[0]))

        stop_sequence = self.register_utils.concatenate_commands(
            (self.register.get_commands("zeros", length=50)[0],
             stop_clock_pulse_cmd_low,
             self.register.get_commands("zeros",
                                        length=10)[0], stop_mode_off_cmd,
             self.register.get_commands("zeros", length=400)[0]))

        # define the command sequence to read the hits of one latency count
        one_latency_read = self.register_utils.concatenate_commands(
            (self.register.get_commands("zeros", length=50)[0],
             self.register.get_commands("RunMode")[0],
             self.register.get_commands("zeros", length=50)[0],
             self.register.get_commands("LV1")[0],
             self.register.get_commands("zeros", length=self.readout_delay)[0],
             self.register.get_commands("ConfMode")[0],
             self.register.get_commands("zeros", length=1000)[0],
             self.register.get_commands("GlobalPulse", Width=0)[0],
             self.register.get_commands("zeros", length=100)[0]))

        self.dut['TX']['CMD_REPEAT'] = self.trig_count
        self.dut['TX']['START_SEQUENCE_LENGTH'] = len(start_sequence)
        self.dut['TX']['STOP_SEQUENCE_LENGTH'] = len(stop_sequence) + 1

        # preload the command to be send for each trigger
        command = self.register_utils.concatenate_commands(
            (start_sequence, one_latency_read, stop_sequence))

        self.register_utils.set_command(command)

        with self.readout(no_data_timeout=self.no_data_timeout,
                          **self.scan_parameters._asdict()):
            with self.trigger():
                got_data = False
                start = time()
                while not self.stop_run.wait(1.0):
                    if not got_data:
                        if self.data_words_per_second() > 0:
                            got_data = True
                            logging.info('Taking data...')
                            if self.max_triggers:
                                self.progressbar = progressbar.ProgressBar(
                                    widgets=[
                                        '',
                                        progressbar.Percentage(), ' ',
                                        progressbar.Bar(marker='*',
                                                        left='|',
                                                        right='|'), ' ',
                                        progressbar.AdaptiveETA()
                                    ],
                                    maxval=self.max_triggers,
                                    poll=10,
                                    term_width=80).start()
                            else:
                                self.progressbar = progressbar.ProgressBar(
                                    widgets=[
                                        '',
                                        progressbar.Percentage(), ' ',
                                        progressbar.Bar(marker='*',
                                                        left='|',
                                                        right='|'), ' ',
                                        progressbar.Timer()
                                    ],
                                    maxval=self.scan_timeout,
                                    poll=10,
                                    term_width=80).start()
                    else:
                        triggers = self.dut['TLU']['TRIGGER_COUNTER']
                        try:
                            if self.max_triggers:
                                self.progressbar.update(triggers)
                            else:
                                self.progressbar.update(time() - start)
                        except ValueError:
                            pass
                        if self.max_triggers and triggers >= self.max_triggers:
                            self.progressbar.finish()
                            self.stop(msg='Trigger limit was reached: %i' %
                                      self.max_triggers)
        logging.info('Total amount of triggers collected: %d',
                     self.dut['TLU']['TRIGGER_COUNTER'])
Exemplo n.º 30
0
def transfer_file(
    file_name, socket_addr
):  # Function to open the raw data file and sending the readouts periodically

    context = zmq.Context()
    socket = context.socket(zmq.PUSH)
    socket.connect(socket_addr)  # change to socket.bind in case ob PUB /SUB
    #     recv_socket = context.socket(zmq.PULL)
    #     recv_socket.connect('tcp://127.0.0.1:5011')
    logging.info("data sent to %s" % socket_addr)
    with tb.open_file(file_name, mode="r") as in_file_h5:
        start = time.time()
        meta_data = in_file_h5.root.meta_data[:]
        raw_data = in_file_h5.root.raw_data[:]
        n_readouts = meta_data.shape[0]

        last_readout_time = time.time()
        try:
            scan_parameter_names = in_file_h5.root.configuration.conf[:][
                'name']  #in_file_h5.root.scan_parameters.dtype.names
        except tb.NoSuchNodeError:
            scan_parameter_names = None
        progress_bar = progressbar.ProgressBar(widgets=[
            '',
            progressbar.Percentage(), ' ',
            progressbar.Bar(marker='*', left='|', right='|'), ' ',
            progressbar.AdaptiveETA()
        ],
                                               maxval=meta_data.shape[0],
                                               term_width=80)
        progress_bar.start()

        for i in range(n_readouts):

            # Raw data indeces of readout
            i_start = meta_data['index_start'][i]
            i_stop = meta_data['index_stop'][i]

            # Time stamps of readout
            t_stop = meta_data[i]['timestamp_stop']
            t_start = meta_data[i]['timestamp_start']

            # Create data of readout (raw data + meta data)
            data = []
            data.append(raw_data[i_start:i_stop])
            data.extend(
                (float(t_start), float(t_stop), int(meta_data[i]['error'])))
            #             scan_par_id = int(meta_data[i]['scan_param_id'])

            send_data(socket, data)

            if i == 0:  # Initialize on first readout
                last_timestamp_start = t_start
            now = time.time()
            #             if now - start > 180:
            #                 break
            delay = now - last_readout_time
            additional_delay = t_start - last_timestamp_start - delay
            if additional_delay > 0:
                # Wait if send too fast, especially needed when readout was
                # stopped during data taking (e.g. for mask shifting)
                time.sleep(additional_delay)
            last_readout_time = time.time()
            last_timestamp_start = t_start
            time.sleep(meta_data[i]['timestamp_stop'] -
                       meta_data[i]['timestamp_start'])
            progress_bar.update(i)
        progress_bar.finish()
    socket.close()
    context.term()