コード例 #1
0
    def analyze(self):
        h5_filename = self.output_filename + '.h5'

        self.logger.info('Starting data analysis...')
        with tb.open_file(h5_filename, 'r+') as h5_file:
            raw_data = h5_file.root.raw_data[:]
            meta_data = h5_file.root.meta_data[:]
            run_config = h5_file.root.configuration.run_config[:]

            # TODO: TMP this should go to analysis function with chunking
            self.logger.info('Interpret raw data...')
            hit_data = analysis.interpret_raw_data(raw_data, meta_data)
            Vthreshold_start = [int(item[1]) for item in run_config if item[0] == 'Vthreshold_start'][0]
            Vthreshold_stop = [int(item[1]) for item in run_config if item[0] == 'Vthreshold_stop'][0]
            n_injections = [int(item[1]) for item in run_config if item[0] == 'n_injections'][0]

            hit_data = hit_data[hit_data['data_header'] == 1]
            param_range = np.unique(meta_data['scan_param_id'])
            
            self.logger.info('Get the global threshold distributions for all pixels...')
            scurve = analysis.scurve_hist(hit_data, param_range)
            self.logger.info('Fit the scurves for all pixels...')
            thr2D, sig2D, chi2ndf2D = analysis.fit_scurves_multithread(scurve, scan_param_range=range(Vthreshold_start, Vthreshold_stop), n_injections=n_injections, invert_x=True)

            h5_file.create_group(h5_file.root, 'interpreted', 'Interpreted Data')

            h5_file.create_table(h5_file.root.interpreted, 'hit_data', hit_data, filters=tb.Filters(complib='zlib', complevel=5))
            h5_file.create_carray(h5_file.root.interpreted, name='HitDistribution', obj=scurve)
            h5_file.create_carray(h5_file.root.interpreted, name='PixelThresholdMap', obj=thr2D.T)
コード例 #2
0
    def analyze(self):
        h5_filename = self.output_filename + '.h5'

        self.logger.info('Starting data analysis...')
        with tb.open_file(h5_filename, 'r+') as h5_file:
            raw_data = h5_file.root.raw_data[:]
            meta_data = h5_file.root.meta_data[:]
            run_config = h5_file.root.configuration.run_config[:]

            # TODO: TMP this should go to analysis function with chunking
            hit_data = analysis.interpret_raw_data(raw_data, meta_data)
            hit_data = hit_data[hit_data['data_header'] == 1]
            param_range = np.unique(meta_data['scan_param_id'])
            scurve = analysis.scurve_hist(hit_data, param_range)

            n_injections = [int(item[1]) for item in run_config if item[0] == 'n_injections'][0]
            VTP_fine_start = [int(item[1]) for item in run_config if item[0] == 'VTP_fine_start'][0]
            VTP_fine_stop = [int(item[1]) for item in run_config if item[0] == 'VTP_fine_stop'][0]

            param_range = range(VTP_fine_start, VTP_fine_stop)
            thr2D, sig2D, chi2ndf2D = analysis.fit_scurves_multithread(scurve, scan_param_range=param_range, n_injections=n_injections)

            h5_file.create_group(h5_file.root, 'interpreted', 'Interpreted Data')

            h5_file.create_table(h5_file.root.interpreted, 'hit_data', hit_data, filters=tb.Filters(complib='zlib', complevel=5))

            h5_file.create_carray(h5_file.root.interpreted, name='HistSCurve', obj=scurve)
            h5_file.create_carray(h5_file.root.interpreted, name='Chi2Map', obj=chi2ndf2D.T)
            h5_file.create_carray(h5_file.root.interpreted, name='ThresholdMap', obj=thr2D.T)
            h5_file.create_carray(h5_file.root.interpreted, name='NoiseMap', obj=sig2D.T)

            pix_occ = np.bincount(hit_data['x'] * 256 + hit_data['y'], minlength=256 * 256).astype(np.uint32)
            hist_occ = np.reshape(pix_occ, (256, 256)).T
            h5_file.create_carray(h5_file.root.interpreted, name='HistOcc', obj=hist_occ)
コード例 #3
0
    def analyze(self, progress = None, status = None, **kwargs):
        '''
            Analyze the data of the scan
            If progress is None a tqdm progress bar is used else progress should be a Multiprocess Queue which stores the progress as fraction of 1
            If there is a status queue information about the status of the scan are put into it
        '''

        h5_filename = self.output_filename + '.h5'

        self.logger.info('Starting data analysis...')
        if status != None:
            status.put("Performing data analysis")

        # Open the HDF5 which contains all data of the scan
        with tb.open_file(h5_filename, 'r+') as h5_file:
            # Read raw data, meta data and configuration parameters
            raw_data = h5_file.root.raw_data[:]
            meta_data = h5_file.root.meta_data[:]
            run_config = h5_file.root.configuration.run_config[:]
            general_config = h5_file.root.configuration.generalConfig[:]
            op_mode = [row[1] for row in general_config if row[0]==b'Op_mode'][0]
            vco = [row[1] for row in general_config if row[0]==b'Fast_Io_en'][0]

            # Create group to save all data and histograms to the HDF file
            h5_file.create_group(h5_file.root, 'interpreted', 'Interpreted Data')

            self.logger.info('Interpret raw data...')
            # Interpret the raw data (2x 32 bit to 1x 48 bit)
            hit_data = analysis.interpret_raw_data(raw_data, op_mode, vco, meta_data, progress = progress)
            raw_data = None

            # Select only data which is hit data
            hit_data = hit_data[hit_data['data_header'] == 1]
            h5_file.create_table(h5_file.root.interpreted, 'hit_data', hit_data, filters=tb.Filters(complib='zlib', complevel=5))
            pix_occ = np.bincount(hit_data['x'] * 256 + hit_data['y'], minlength=256 * 256).astype(np.uint32)
            hist_occ = np.reshape(pix_occ, (256, 256)).T
            h5_file.create_carray(h5_file.root.interpreted, name='HistOcc', obj=hist_occ)
            param_range = np.unique(meta_data['scan_param_id'])
            meta_data = None
            pix_occ = None
            hist_occ = None

            # Create histograms for number of detected hits for individual thresholds
            scurve = analysis.scurve_hist(hit_data, param_range)
            hit_data = None

            # Read needed configuration parameters
            n_injections = [int(item[1]) for item in run_config if item[0] == b'n_injections'][0]
            Vthreshold_start = [int(item[1]) for item in run_config if item[0] == b'Vthreshold_start'][0]
            Vthreshold_stop = [int(item[1]) for item in run_config if item[0] == b'Vthreshold_stop'][0]

            # Fit S-Curves to the histogramms for all pixels
            param_range = list(range(Vthreshold_start, Vthreshold_stop))
            thr2D, sig2D, chi2ndf2D = analysis.fit_scurves_multithread(scurve, scan_param_range=param_range, n_injections=n_injections, invert_x=True, progress = progress)

            h5_file.create_carray(h5_file.root.interpreted, name='HistSCurve', obj=scurve)
            h5_file.create_carray(h5_file.root.interpreted, name='Chi2Map', obj=chi2ndf2D.T)
            h5_file.create_carray(h5_file.root.interpreted, name='ThresholdMap', obj=thr2D.T)
            h5_file.create_carray(h5_file.root.interpreted, name='NoiseMap', obj=sig2D.T)
コード例 #4
0
    def analyze(self):
        h5_filename = self.output_filename + '.h5'

        self.logger.info('Starting data analysis...')
        with tb.open_file(h5_filename, 'r+') as h5_file:
            raw_data = h5_file.root.raw_data[:]
            meta_data = h5_file.root.meta_data[:]
            run_config = h5_file.root.configuration.run_config[:]

            # TODO: TMP this should go to analysis function with chunking
            self.logger.info('Interpret raw data...')
            hit_data = analysis.interpret_raw_data(raw_data, meta_data)
            print(hit_data)
            Vthreshold_start = [
                int(item[1]) for item in run_config
                if item[0] == 'Vthreshold_start'
            ][0]
            Vthreshold_stop = [
                int(item[1]) for item in run_config
                if item[0] == 'Vthreshold_stop'
            ][0]

            hit_data = hit_data[hit_data['data_header'] == 1]
            print(hit_data)
            param_range = np.unique(meta_data['scan_param_id'])

            self.logger.info(
                'Get the global threshold distributions for all pixels...')
            scurve = analysis.scurve_hist(hit_data, param_range)
            self.logger.info(
                'Calculate the mean of the global threshold distributions for all pixels...'
            )
            vths = analysis.vths(scurve, param_range, Vthreshold_start)

            h5_file.create_group(h5_file.root, 'interpreted',
                                 'Interpreted Data')

            h5_file.create_table(h5_file.root.interpreted,
                                 'hit_data',
                                 hit_data,
                                 filters=tb.Filters(complib='zlib',
                                                    complevel=5))
            h5_file.create_carray(h5_file.root.interpreted,
                                  name='HitDistribution',
                                  obj=scurve)
            h5_file.create_carray(h5_file.root.interpreted,
                                  name='PixelThresholdMap',
                                  obj=vths.T)
コード例 #5
0
ファイル: tune_noise.py プロジェクト: solitonium/tpx3-daq
        def take_data():
            self.fifo_readout.reset_rx()
            self.fifo_readout.enable_rx(True)
            time.sleep(0.2)

            mask_step_cmd = []
            for i in range(256 / 4):
                mask_step_cmd.append(
                    self.chip.write_pcr(range(4 * i, 4 * i + 4), write=False))
            mask_step_cmd.append(self.chip.read_pixel_matrix_datadriven())
            self.chip.write(mask_step_cmd)

            stop_cmd = []
            stop_cmd.append(self.chip.stop_readout(write=False))
            #stop_cmd.append(self.chip.set_dac("Vthreshold_coarse", 15, write=False))
            stop_cmd.append(self.chip.reset_sequential(write=False))

            with self.readout(scan_param_id=1,
                              fill_buffer=True,
                              clear_buffer=True):
                time.sleep(0.1)
                #self.chip.set_dac("Vthreshold_coarse", 6)
                time.sleep(0.01)
                with self.shutter():
                    time.sleep(wait_time)
                self.chip.write(stop_cmd)
                time.sleep(0.1)

            dqdata = self.fifo_readout.data
            raw_data = np.concatenate([item[0] for item in dqdata])
            error = (len(raw_data) % 2 != 0)

            hit_data = analysis.interpret_raw_data(raw_data)

            error |= (self.chip['RX'].LOST_DATA_COUNTER > 0)
            error |= (self.chip['RX'].DECODER_ERROR_COUNTER > 0)
            if error:
                self.logger.error('DATA ERROR')

            hit_data = hit_data[hit_data['data_header'] == 1]
            #self.logger.info('raw_data = %d, hit_data = %d' % (len(raw_data), len(hit_data)))

            bc = np.bincount(hit_data['x'].astype(np.uint16) * 256 +
                             hit_data['y'],
                             minlength=256 * 256)
            hist_occ = np.reshape(bc, (256, 256))
            return hist_occ
コード例 #6
0
ファイル: tpx3_inter.py プロジェクト: solitonium/tpx3-daq
    def interpret_data(self, data):
        ''' Called for every chunk received '''

        if isinstance(data[0][1], dict):  # Meta data
            return self._interpret_meta_data(data)

        raw_data = data[0][1]
        hit_data = analysis.interpret_raw_data(data[0][1])
        hit_data = hit_data[hit_data['data_header'] == 1]

        pix_occ = np.bincount(hit_data['x'] * 256 + hit_data['y'], minlength=256*256).astype(np.uint32)
        hist_occ = np.reshape(pix_occ, (256, 256))


        hit_count = np.count_nonzero(hist_occ.flat)
        self.total_hits += len(hit_data)
        self.readout += 1
        self.total_events = self.readout  # ???

        if hit_count > 1: #cut noise
            self.hist_hit_count[hit_count] += 1
            self.hist_occ += hist_occ


        #TODO: self.hist_tot ...
        interpreted_data = {
            #'hits': hit_data,
            'occupancy': self.hist_occ,
            'tot_hist': self.hist_tot,
            'hist_hit_count': self.hist_hit_count,
            'hist_event_status': []
        }

        if self.int_readouts != 0:  # = 0 for infinite integration
            if self.readout % self.int_readouts == 0:
                self.reset_hists()

        return [interpreted_data]
コード例 #7
0
    def analyze(self, progress=None, status=None, result_path=None, **kwargs):
        '''
            Analyze the data of the equalisation and calculate the equalisation matrix
            If progress is None a tqdm progress bar is used else progress should be a Multiprocess Queue which stores the progress as fraction of 1
            If there is a status queue information about the status of the scan are put into it
        '''

        h5_filename = self.output_filename + '.h5'

        self.logger.info('Starting data analysis...')
        if status != None:
            status.put("Performing data analysis")

        # Open the HDF5 which contains all data of the equalisation
        with tb.open_file(h5_filename, 'r+') as h5_file:
            # Read raw data, meta data and configuration parameters
            meta_data = h5_file.root.meta_data[:]
            run_config = h5_file.root.configuration.run_config[:]
            general_config = h5_file.root.configuration.generalConfig[:]
            op_mode = [
                row[1] for row in general_config if row[0] == b'Op_mode'
            ][0]
            vco = [
                row[1] for row in general_config if row[0] == b'Fast_Io_en'
            ][0]

            self.logger.info('Interpret raw data...')

            # THR = 0
            param_range, index = np.unique(meta_data['scan_param_id'],
                                           return_index=True)
            meta_data_th0 = meta_data[
                meta_data['scan_param_id'] < len(param_range) // 2]
            param_range_th0 = np.unique(meta_data_th0['scan_param_id'])

            # THR = 15
            meta_data_th15 = meta_data[
                meta_data['scan_param_id'] >= len(param_range) // 2]
            param_range_th15 = np.unique(meta_data_th15['scan_param_id'])

            # shift indices so that they start with zero
            start = meta_data_th15['index_start'][0]
            meta_data_th15[
                'index_start'] = meta_data_th15['index_start'] - start
            meta_data_th15['index_stop'] = meta_data_th15['index_stop'] - start

            self.logger.info('THR = 0')
            #THR = 0
            raw_data_thr0 = h5_file.root.raw_data[:meta_data_th0['index_stop']
                                                  [-1]]
            hit_data_thr0 = analysis.interpret_raw_data(raw_data_thr0,
                                                        op_mode,
                                                        vco,
                                                        meta_data_th0,
                                                        progress=progress)
            raw_data_thr0 = None

            self.logger.info('THR = 15')
            #THR = 15
            raw_data_thr15 = h5_file.root.raw_data[
                meta_data_th0['index_stop'][-1]:]
            hit_data_thr15 = analysis.interpret_raw_data(raw_data_thr15,
                                                         op_mode,
                                                         vco,
                                                         meta_data_th15,
                                                         progress=progress)
            raw_data_thr15 = None

        # Read needed configuration parameters
        Vthreshold_start = [
            int(item[1]) for item in run_config
            if item[0] == b'Vthreshold_start'
        ][0]
        Vthreshold_stop = [
            int(item[1]) for item in run_config
            if item[0] == b'Vthreshold_stop'
        ][0]
        chip_wafer = [
            int(item[1]) for item in run_config if item[0] == b'chip_wafer'
        ][0]
        chip_x = [
            item[1].decode() for item in run_config if item[0] == b'chip_x'
        ][0]
        chip_y = [int(item[1]) for item in run_config
                  if item[0] == b'chip_y'][0]

        # Select only data which is hit data
        hit_data_thr0 = hit_data_thr0[hit_data_thr0['data_header'] == 1]
        hit_data_thr15 = hit_data_thr15[hit_data_thr15['data_header'] == 1]

        # Divide the data into two parts - data for pixel threshold 0 and 15
        param_range = np.unique(meta_data['scan_param_id'])
        meta_data = None
        param_range_th0 = np.unique(hit_data_thr0['scan_param_id'])
        param_range_th15 = np.unique(hit_data_thr15['scan_param_id'])

        # Create histograms for number of detected hits for individual thresholds
        self.logger.info(
            'Get the global threshold distributions for all pixels...')
        scurve_th0 = analysis.scurve_hist(hit_data_thr0, param_range_th0)
        hit_data_thr0 = None
        scurve_th15 = analysis.scurve_hist(hit_data_thr15, param_range_th15)
        hit_data_thr15 = None

        # Calculate the mean of the threshold distributions for all pixels
        self.logger.info(
            'Calculate the mean of the global threshold distributions for all pixels...'
        )
        vths_th0 = analysis.vths(scurve_th0, param_range_th0, Vthreshold_start)
        scurve_th0 = None
        vths_th15 = analysis.vths(scurve_th15, param_range_th15,
                                  Vthreshold_start)
        scurve_th15 = None

        # Get the treshold distributions for both scan
        self.logger.info('Get the cumulated global threshold distributions...')
        hist_th0 = analysis.vth_hist(vths_th0, Vthreshold_stop)
        hist_th15 = analysis.vth_hist(vths_th15, Vthreshold_stop)
        vths_th15 = None

        # Use the threshold histogramms and one threshold distribution to calculate the equalisation
        self.logger.info('Calculate the equalisation matrix...')
        eq_matrix = analysis.eq_matrix(hist_th0, hist_th15, vths_th0,
                                       Vthreshold_start, Vthreshold_stop)

        # Don't mask any pixels in the mask file
        mask_matrix = np.zeros((256, 256), dtype=np.bool)
        mask_matrix[:, :] = 0

        # Write the equalisation matrix to a new HDF5 file
        self.save_thr_mask(eq_matrix, chip_wafer, chip_x, chip_y)

        if result_path != None:
            result_path.put(self.thrfile)
コード例 #8
0
ファイル: ToT_calib.py プロジェクト: SiLab-Bonn/tpx3-daq
    def analyze(self, progress=None, status=None, **kwargs):
        '''
            Analyze the data of the scan
            If progress is None a tqdm progress bar is used else progress should be a Multiprocess Queue which stores the progress as fraction of 1
            If there is a status queue information about the status of the scan are put into it
        '''
        h5_filename = self.output_filename + '.h5'

        self.logger.info('Starting data analysis...')
        if status != None:
            status.put("Performing data analysis")

        # Open the HDF5 which contains all data of the calibration
        with tb.open_file(h5_filename, 'r+') as h5_file:
            # Read raw data, meta data and configuration parameters
            meta_data = h5_file.root.meta_data[:]
            run_config = h5_file.root.configuration.run_config[:]
            general_config = h5_file.root.configuration.generalConfig[:]
            op_mode = [
                row[1] for row in general_config if row[0] == b'Op_mode'
            ][0]
            vco = [
                row[1] for row in general_config if row[0] == b'Fast_Io_en'
            ][0]

            # Create group to save all data and histograms to the HDF file
            try:
                h5_file.remove_node(h5_file.root.interpreted, recursive=True)
            except:
                pass

            h5_file.create_group(h5_file.root, 'interpreted',
                                 'Interpreted Data')

            self.logger.info('Interpret raw data...')
            param_range = np.unique(meta_data['scan_param_id'])

            # Create arrays for interpreted data for all scan parameter IDs
            totcurves_means = np.zeros((256 * 256, len(param_range)),
                                       dtype=np.uint16)
            totcurves_hits = np.zeros((256 * 256, len(param_range)),
                                      dtype=np.uint16)

            if progress == None:
                pbar = tqdm(total=len(param_range))
            else:
                step_counter = 0

            # Interpret data seperately per scan parameter id to save RAM
            for param_id in param_range:
                start_index = meta_data[meta_data['scan_param_id'] == param_id]
                stop_index = meta_data[meta_data['scan_param_id'] == param_id]
                # Interpret the raw data (2x 32 bit to 1x 48 bit)
                raw_data_tmp = h5_file.root.raw_data[
                    start_index['index_start'][0]:stop_index['index_stop'][-1]]
                hit_data_tmp = analysis.interpret_raw_data(raw_data_tmp,
                                                           op_mode,
                                                           vco,
                                                           progress=progress)
                raw_data_tmp = None

                # Select only data which is hit data
                hit_data_tmp = hit_data_tmp[hit_data_tmp['data_header'] == 1]

                # Create histograms for number of detected ToT clock cycles for individual testpulses
                full_tmp, count_tmp = analysis.totcurve_hist(hit_data_tmp)

                # Put results of current scan parameter ID in overall arrays
                totcurves_means[:, param_id] = full_tmp
                full_tmp = None
                totcurves_hits[:, param_id] = count_tmp
                count_tmp = None
                hit_data_tmp = None

                if progress == None:
                    pbar.update(1)
                else:
                    step_counter += 1
                    fraction = step_counter / (len(param_range))
                    progress.put(fraction)

            if progress == None:
                pbar.close()

            meta_data = None

            # Calculate the mean ToT per pixel per pulse
            totcurve = np.divide(totcurves_means,
                                 totcurves_hits,
                                 where=totcurves_hits > 0)
            totcurve = np.nan_to_num(totcurve)

            # Only use pixel which saw exactly all pulses
            totcurve[totcurves_hits != 10] = 0
            hit_data = None

            # Read needed configuration parameters
            VTP_fine_start = [
                int(item[1]) for item in run_config
                if item[0] == b'VTP_fine_start'
            ][0]
            VTP_fine_stop = [
                int(item[1]) for item in run_config
                if item[0] == b'VTP_fine_stop'
            ][0]

            # Fit ToT-Curves to the histogramms for all pixels
            param_range = list(range(VTP_fine_start, VTP_fine_stop))

            h5_file.create_carray(h5_file.root.interpreted,
                                  name='HistToTCurve',
                                  obj=totcurve)
            h5_file.create_carray(h5_file.root.interpreted,
                                  name='HistToTCurve_Full',
                                  obj=totcurves_means)
            h5_file.create_carray(h5_file.root.interpreted,
                                  name='HistToTCurve_Count',
                                  obj=totcurves_hits)

            mean, popt, pcov = analysis.fit_totcurves_mean(
                totcurve, scan_param_range=param_range, progress=progress)

            h5_file.create_table(h5_file.root.interpreted, 'mean_curve', mean)

            data_type = {
                'names': ['param', 'value', 'stddev'],
                'formats': ['S1', 'float32', 'float32']
            }

            parameter_table = np.recarray(4, dtype=data_type)
            parameter_table['param'] = ['a', 'b', 'c', 't']
            parameter_table['value'] = [popt[0], popt[1], popt[2], popt[3]]
            parameter_table['stddev'] = [
                np.sqrt(pcov[0][0]),
                np.sqrt(pcov[1][1]),
                np.sqrt(pcov[2][2]),
                np.sqrt(pcov[3][3])
            ]

            h5_file.create_table(h5_file.root.interpreted, 'fit_params',
                                 parameter_table)
コード例 #9
0
ファイル: equalisation.py プロジェクト: solitonium/tpx3-daq
    def analyze(self):
        h5_filename = self.output_filename + '.h5'

        self.logger.info('Starting data analysis...')
        with tb.open_file(h5_filename, 'r+') as h5_file:
            raw_data = h5_file.root.raw_data[:]
            meta_data = h5_file.root.meta_data[:]
            run_config = h5_file.root.configuration.run_config[:]

            # TODO: TMP this should go to analysis function with chunking
            #print('haeder1\t header2\t y\t x\t Hits\t Counter')
            self.logger.info('Interpret raw data...')
            hit_data = analysis.interpret_raw_data(raw_data, meta_data)
            Vthreshold_start = [int(item[1]) for item in run_config if item[0] == 'Vthreshold_start'][0]
            Vthreshold_stop = [int(item[1]) for item in run_config if item[0] == 'Vthreshold_stop'][0]

            hit_data = hit_data[hit_data['data_header'] == 1]
            param_range = np.unique(meta_data['scan_param_id'])
            hit_data_th0 = hit_data[hit_data['scan_param_id'] < len(param_range) / 2]
            param_range_th0 = np.unique(hit_data_th0['scan_param_id'])
            hit_data_th15 = hit_data[hit_data['scan_param_id'] >= len(param_range) / 2]
            param_range_th15 = np.unique(hit_data_th15['scan_param_id'])
            
            self.logger.info('Get the global threshold distributions for all pixels...')
            scurve_th0 = analysis.scurve_hist(hit_data_th0, param_range_th0)
            scurve_th15 = analysis.scurve_hist(hit_data_th15, param_range_th15)
            self.logger.info('Calculate the mean of the global threshold distributions for all pixels...')
            vths_th0 = analysis.vths(scurve_th0, param_range_th0, Vthreshold_start)
            vths_th15 = analysis.vths(scurve_th15, param_range_th15, Vthreshold_start)
            self.logger.info('Get the cumulated global threshold distributions...')
            hist_th0 = analysis.vth_hist(vths_th0, Vthreshold_stop)
            hist_th15 = analysis.vth_hist(vths_th15, Vthreshold_stop)

            self.logger.info('Calculate the equalisation matrix...')
            eq_matrix = analysis.eq_matrix(hist_th0, hist_th15, vths_th0, Vthreshold_start, Vthreshold_stop)
            mask_matrix = np.zeros((256, 256), dtype=np.bool)
            mask_matrix[:, :] = 0

            self.logger.info('Writing mask_matrix to file...')
            maskfile = os.path.join(self.working_dir, self.timestamp + '_mask.h5')

            with tb.open_file(maskfile, 'a') as out_file:
                try:
                    out_file.remove_node(out_file.root.mask_matrix)
                except NoSuchNodeError:
                    self.logger.debug('Specified maskfile does not include a mask_matrix yet!')

                out_file.create_carray(out_file.root,
                                    name='mask_matrix',
                                    title='Matrix mask',
                                    obj=mask_matrix)
                self.logger.info('Closing mask file: %s' % (maskfile))

            self.logger.info('Writing equalisation matrix to file...')
            with tb.open_file(maskfile, 'a') as out_file:
                try:
                    out_file.remove_node(out_file.root.thr_matrix)
                except NoSuchNodeError:
                    self.logger.debug('Specified maskfile does not include a thr_mask yet!')

                out_file.create_carray(out_file.root,
                                        name='thr_matrix',
                                        title='Matrix Threshold',
                                        obj=eq_matrix)
                self.logger.info('Closing equalisation matrix file: %s' % (maskfile))
コード例 #10
0
    def analyze(self,
                file_name,
                args,
                cluster_radius=1.1,
                cluster_dt=5,
                progress=None):

        big = args_dict["big"]
        new_file = args_dict["new_file"]

        self.logger.info('Starting data analysis of ' + str(file_name) +
                         ' ...')

        if new_file:
            output_filename = self.create_output_file(file_name)
        else:
            output_filename = file_name

        #if file_name != "":
        self.h5_filename = file_name
        self.h5_filename_out = output_filename
        file_extension = file_name.split('/')[-1]
        #with tb.open_file(self.h5_filename, 'r+') as h5_file_in:
        h5_file_in = tb.open_file(self.h5_filename, 'r+')
        meta_data = h5_file_in.root.meta_data[:]
        run_config = h5_file_in.root.configuration.run_config[:]
        general_config = h5_file_in.root.configuration.generalConfig[:]
        op_mode = [row[1] for row in general_config if row[0] == b'Op_mode'][0]
        #vco = [row[1] for row in general_config if row[0]==b'Fast_Io_en'][0]
        vco = False

        with tb.open_file(self.h5_filename_out, 'r+') as h5_file:

            # create structures to write the hit_data and cluster data in
            try:
                h5_file.remove_node(h5_file.root.interpreted, recursive=True)
                print("Node interpreted allready there")
            except:
                print("Create node interpreted")

            h5_file.create_group(h5_file.root, 'interpreted',
                                 'Interpreted Data')

            try:
                h5_file.remove_node(h5_file.root.reconstruction,
                                    recursive=True)
                print("Node reconstrution allready there")
            except:
                print("Create node reconstrution")

            h5_file.create_group(h5_file.root, 'reconstruction',
                                 'Reconstructed Data')

            # for large data_sets we might want to split it into smaller parts to speed up analysis and save RAM
            if big == True:
                # customize number of meta data chunks to be analyzed at once here
                chunk_length = 3000
                meta_length = len(meta_data)

                # array of indices of the meta_data chunks each package of chunks begins with
                iteration_array = range(0, meta_length, chunk_length)
            # for smaller data we just analyse everything at once -> only one set    chunks, involving all data
            else:
                iteration_array = [0]

            cluster_sum = 0
            cluster_sum_g1 = 0
            hit_sum = 0
            hit_sum_b = 0

            hit_index = 0
            # iterate over all sets of chunks
            for num, i in enumerate(iteration_array):
                # Split meta_data
                if big == False:  # take all data
                    self.logger.info("Start analysis of part 1/1")
                    meta_data_tmp = meta_data[:]
                elif i < meta_length - chunk_length:  # take all data in chunks
                    self.logger.info(
                        "Start analysis of part %d/%d" %
                        (num + 1, math.ceil(meta_length / chunk_length)))
                    meta_data_tmp = meta_data[i:i + chunk_length]
                else:  # take all data until the end
                    self.logger.info(
                        "Start analysis of part %d/%d" %
                        (num + 1, math.ceil(meta_length / chunk_length)))
                    meta_data_tmp = meta_data[i:]
                # get raw_data
                raw_data_tmp = h5_file_in.root.raw_data[meta_data_tmp[
                    'index_start'][0]:meta_data_tmp['index_stop'][-1]]
                # shift indices in meta_data to start a zero
                start = meta_data_tmp['index_start'][0]
                meta_data_tmp[
                    'index_start'] = meta_data_tmp['index_start'] - start
                meta_data_tmp[
                    'index_stop'] = meta_data_tmp['index_stop'] - start
                # analyze data
                hit_data_tmp = analysis.interpret_raw_data(raw_data_tmp,
                                                           op_mode,
                                                           vco,
                                                           meta_data_tmp,
                                                           split_fine=True)

                print(hit_data_tmp.shape[0])
                if hit_data_tmp.shape[0] != 0:
                    hit_data_tmp = hit_data_tmp[hit_data_tmp['data_header'] ==
                                                1]
                    hit_data_tmp['hit_index'] = range(
                        hit_index, hit_index + hit_data_tmp.shape[0])
                    hit_index += hit_data_tmp.shape[0]

                    # cluster data
                    self.logger.info("Start clustering...")
                    cluster_data = self.cluster(hit_data_tmp, cluster_radius,
                                                cluster_dt)
                    self.logger.info("Done with clustering.")

                    # save hit_data
                    h5_file.create_table(h5_file.root.interpreted,
                                         'hit_data_' + str(num),
                                         hit_data_tmp,
                                         filters=tb.Filters(complib='zlib',
                                                            complevel=5))

                    # create group for cluster data
                    group = h5_file.create_group(
                        h5_file.root.reconstruction, 'run_' + str(num),
                        'Cluster Data of Chunk ' + str(num))

                    # write cluster data into h5 file
                    self.logger.info("Start writing into h5 file...")
                    vlarray = h5_file.create_vlarray(group,
                                                     'x',
                                                     tb.Int32Atom(shape=()),
                                                     "x-values",
                                                     filters=tb.Filters(
                                                         complib='zlib',
                                                         complevel=5))
                    for i in range(cluster_data.shape[0]):
                        vlarray.append(cluster_data['x'][i])

                    vlarray = h5_file.create_vlarray(group,
                                                     'y',
                                                     tb.Int32Atom(shape=()),
                                                     "y-values",
                                                     filters=tb.Filters(
                                                         complib='zlib',
                                                         complevel=5))
                    for i in range(cluster_data.shape[0]):
                        vlarray.append(cluster_data['y'][i])

                    vlarray = h5_file.create_vlarray(group,
                                                     'TOA',
                                                     tb.Int64Atom(shape=()),
                                                     "TOA-values",
                                                     filters=tb.Filters(
                                                         complib='zlib',
                                                         complevel=5))
                    for i in range(cluster_data.shape[0]):
                        vlarray.append(cluster_data['TOA'][i])

                    vlarray = h5_file.create_vlarray(group,
                                                     'TOT',
                                                     tb.Int32Atom(shape=()),
                                                     "TOT-values",
                                                     filters=tb.Filters(
                                                         complib='zlib',
                                                         complevel=5))
                    for i in range(cluster_data.shape[0]):
                        vlarray.append(cluster_data['TOT'][i])

                    vlarray = h5_file.create_vlarray(group,
                                                     'EventCounter',
                                                     tb.Int32Atom(shape=()),
                                                     "EventCounter-values",
                                                     filters=tb.Filters(
                                                         complib='zlib',
                                                         complevel=5))
                    for i in range(cluster_data.shape[0]):
                        vlarray.append(cluster_data['EventCounter'][i])

                    vlarray = h5_file.create_vlarray(group,
                                                     'TOA_Extension',
                                                     tb.Int64Atom(shape=()),
                                                     "TOA_Extension-values",
                                                     filters=tb.Filters(
                                                         complib='zlib',
                                                         complevel=5))
                    for i in range(cluster_data.shape[0]):
                        vlarray.append(cluster_data['TOA_Extension'][i])

                    vlarray = h5_file.create_vlarray(group,
                                                     'hit_index',
                                                     tb.Int64Atom(shape=()),
                                                     "hit_index-values",
                                                     filters=tb.Filters(
                                                         complib='zlib',
                                                         complevel=5))
                    for i in range(cluster_data.shape[0]):
                        vlarray.append(cluster_data['hit_index'][i])

                    vlarray = h5_file.create_array(group, 'cluster_nr',
                                                   cluster_data['cluster_nr'],
                                                   "cluster_nr-values")

                    h5_file.create_array(group, 'chunk_start_time',
                                         cluster_data['chunk_start_time'],
                                         "chunk_start_time-values")

                    h5_file.create_array(group, 'hits', cluster_data['hits'],
                                         "size of cluster")

                    h5_file.create_array(group, 'centerX',
                                         cluster_data['centerX'],
                                         "mean of the x values")

                    h5_file.create_array(group, 'centerY',
                                         cluster_data['centerY'],
                                         "mean of the y values")

                    h5_file.create_array(group, 'sumTOT',
                                         cluster_data['sumTOT'],
                                         "sum of the ToT in the cluster")

                    # print out cluster information
                    print("# cluster in chunk: " +
                          str(len(cluster_data['hits'])))
                    if len(cluster_data['hits']) != 0:
                        print("average size: " +
                              str(np.mean(cluster_data['hits'])))
                    print("total hits in chunk: " +
                          str(np.sum(cluster_data['hits'])))

                    cluster_sum += len(cluster_data['hits'])
                    cluster_sum_g1 += len(
                        cluster_data['hits'][cluster_data['hits'] > 1])
                    hit_sum += np.sum(cluster_data['hits'])
                    hit_sum_b += hit_data_tmp.shape[0]

            # print out final information on clustering
            print("# cluster in total: " + str(cluster_sum))
            print("# cluster with more than one hit: " + str(cluster_sum_g1))
            print("# hits in total: " + str(hit_sum))
            print("# hits in total alternative calc: " + str(hit_sum))
コード例 #11
0
ファイル: PixelDAC_opt.py プロジェクト: SiLab-Bonn/tpx3-daq
    def analyze_iteration(self, iteration=0, progress=None, status=None):
        '''
            Analyze the data of the iteration and calculate the new Ibias_PixelDAC value.
            In the last iteration the data is also used to calculate an equalisation matrix.
            If progress is None a tqdm progress bar is used else progress should be a Multiprocess Queue which stores the progress as fraction of 1
            If there is a status queue information about the status of the scan are put into it
        '''

        h5_filename = self.output_filename + '.h5'

        self.logger.info('Starting data analysis...')
        if status != None:
            status.put("Performing data analysis")

        # Open the HDF5 which contains all data of the optimization iteration
        with tb.open_file(h5_filename, 'r+') as h5_file:
            # Read raw data, meta data and configuration parameters for the current iteration
            meta_data_call = ('h5_file.root.' + 'meta_data_' + str(iteration) +
                              '[:]')
            meta_data = eval(meta_data_call)
            run_config_call = ('h5_file.root.' + 'configuration.run_config_' +
                               str(iteration) + '[:]')
            run_config = eval(run_config_call)
            general_config_call = ('h5_file.root.' +
                                   'configuration.generalConfig_' +
                                   str(iteration) + '[:]')
            general_config = eval(general_config_call)
            op_mode = [
                row[1] for row in general_config if row[0] == b'Op_mode'
            ][0]
            vco = [
                row[1] for row in general_config if row[0] == b'Fast_Io_en'
            ][0]

            self.logger.info('Interpret raw data...')

            # THR = 0
            param_range, index = np.unique(meta_data['scan_param_id'],
                                           return_index=True)
            meta_data_th0 = meta_data[
                meta_data['scan_param_id'] < len(param_range) // 2]
            param_range_th0 = np.unique(meta_data_th0['scan_param_id'])

            # THR = 15
            meta_data_th15 = meta_data[
                meta_data['scan_param_id'] >= len(param_range) // 2]
            param_range_th15 = np.unique(meta_data_th15['scan_param_id'])

            # shift indices so that they start with zero
            start = meta_data_th15['index_start'][0]
            meta_data_th15[
                'index_start'] = meta_data_th15['index_start'] - start
            meta_data_th15['index_stop'] = meta_data_th15['index_stop'] - start

            self.logger.info('THR = 0')
            #THR = 0
            raw_data_call = ('h5_file.root.' + 'raw_data_' + str(iteration) +
                             '[:' + meta_data_th0['index_stop'][-1] + ']')
            raw_data_thr0 = eval(raw_data_call)
            hit_data_thr0 = analysis.interpret_raw_data(raw_data_thr0,
                                                        op_mode,
                                                        vco,
                                                        meta_data_th0,
                                                        progress=progress)
            raw_data_thr0 = None

            self.logger.info('THR = 15')
            #THR = 15
            raw_data_call = ('h5_file.root.' + 'raw_data_' + str(iteration) +
                             '[' + meta_data_th0['index_stop'][-1] + ':]')
            raw_data_thr15 = eval(raw_data_call)
            hit_data_thr15 = analysis.interpret_raw_data(raw_data_thr15,
                                                         op_mode,
                                                         vco,
                                                         meta_data_th15,
                                                         progress=progress)
            raw_data_thr15 = None

        # Read needed configuration parameters
        Vthreshold_start = [
            int(item[1]) for item in run_config
            if item[0] == b'Vthreshold_start'
        ][0]
        Vthreshold_stop = [
            int(item[1]) for item in run_config
            if item[0] == b'Vthreshold_stop'
        ][0]
        n_injections = [
            int(item[1]) for item in run_config if item[0] == b'n_injections'
        ][0]
        pixeldac = [
            int(item[1]) for item in run_config if item[0] == b'pixeldac'
        ][0]
        last_pixeldac = [
            int(item[1]) for item in run_config if item[0] == b'last_pixeldac'
        ][0]
        last_delta = [
            float(item[1]) for item in run_config if item[0] == b'last_delta'
        ][0]
        chip_wafer = [
            int(item[1]) for item in run_config if item[0] == b'chip_wafer'
        ][0]
        chip_x = [
            item[1].decode() for item in run_config if item[0] == b'chip_x'
        ][0]
        chip_y = [int(item[1]) for item in run_config
                  if item[0] == b'chip_y'][0]

        # Select only data which is hit data
        hit_data_thr0 = hit_data_thr0[hit_data_thr0['data_header'] == 1]
        hit_data_thr15 = hit_data_thr15[hit_data_thr15['data_header'] == 1]

        # Divide the data into two parts - data for pixel threshold 0 and 15
        param_range = np.unique(meta_data['scan_param_id'])
        meta_data = None
        param_range_th0 = np.unique(hit_data_thr0['scan_param_id'])
        param_range_th15 = np.unique(hit_data_thr15['scan_param_id'])

        # Create histograms for number of detected hits for individual thresholds
        self.logger.info(
            'Get the global threshold distributions for all pixels...')
        scurve_th0 = analysis.scurve_hist(hit_data_thr0,
                                          np.arange(len(param_range) // 2))
        hit_data_thr0 = None
        scurve_th15 = analysis.scurve_hist(
            hit_data_thr15, np.arange(len(param_range) // 2, len(param_range)))
        hit_data_thr15 = None

        # Fit S-Curves to the histogramms for all pixels
        self.logger.info('Fit the scurves for all pixels...')
        thr2D_th0, sig2D_th0, chi2ndf2D_th0 = analysis.fit_scurves_multithread(
            scurve_th0,
            scan_param_range=list(range(Vthreshold_start, Vthreshold_stop)),
            n_injections=n_injections,
            invert_x=True,
            progress=progress)
        scurve_th0 = None
        thr2D_th15, sig2D_th15, chi2ndf2D_th15 = analysis.fit_scurves_multithread(
            scurve_th15,
            scan_param_range=list(range(Vthreshold_start, Vthreshold_stop)),
            n_injections=n_injections,
            invert_x=True,
            progress=progress)
        scurve_th15 = None

        # Put the threshold distribution based on the fit results in two histogramms
        self.logger.info('Get the cumulated global threshold distributions...')
        hist_th0 = analysis.vth_hist(thr2D_th0, Vthreshold_stop)
        hist_th15 = analysis.vth_hist(thr2D_th15, Vthreshold_stop)

        # Use the threshold histogramms to calculate the new Ibias_PixelDAC setting
        self.logger.info('Calculate new pixelDAC value...')
        pixeldac_result = analysis.pixeldac_opt(hist_th0, hist_th15, pixeldac,
                                                last_pixeldac, last_delta,
                                                Vthreshold_start,
                                                Vthreshold_stop)
        delta = pixeldac_result[1]
        rms_delta = pixeldac_result[2]

        # In the last iteration calculate also the equalisation matrix
        if delta > rms_delta - 2 and delta < rms_delta + 2:
            # Use the threshold histogramms and one threshold distribution to calculate the equalisation
            self.logger.info('Calculate the equalisation matrix...')
            eq_matrix = analysis.eq_matrix(hist_th0, hist_th15, thr2D_th0,
                                           Vthreshold_start, Vthreshold_stop)

            # Don't mask any pixels in the mask file
            mask_matrix = np.zeros((256, 256), dtype=np.bool)
            mask_matrix[:, :] = 0

            # Write the equalisation matrix to a new HDF5 file
            self.save_thr_mask(eq_matrix, chip_wafer, chip_x, chip_y)

        self.logger.info(
            'Result of iteration: Scan with pixeldac %i - New pixeldac %i. Delta was %f with optimal delta %f'
            % (int(pixeldac), int(
                pixeldac_result[0]), pixeldac_result[1], pixeldac_result[2]))
        return pixeldac_result