Esempio n. 1
0
    def event_proc(self, evnum, nhits, pktsec) :
        """
           TODO by end user:
           Here you must read in a data block from your data file
           and fill the array tdc_ns[][] and number_of_hits[]
        """
        if evnum == self.evnum_old : return
        self.evnum_old = evnum

        if not self.set_data_arrays(nhits, pktsec) : return

        sorter, number_of_hits, tdc_ns  = self.sorter, self.number_of_hits, self.tdc_ns

        Cu1, Cu2, Cv1, Cv2, Cw1, Cw2, Cmcp = sorter.channel_indexes

        if self.VERBOSE : print_tdc_ns(tdc_ns, cmt='  TDC raw data -> ns ')

        if sorter.use_hex :        
  	    # shift the time sums to zero:
            sorter.shift_sums(+1, self.offset_sum_u, self.offset_sum_v, self.offset_sum_w)
   	    #shift layer w so that the middle lines of all layers intersect in one point:
            sorter.shift_layer_w(+1, self.w_offset)
        else :
            # shift the time sums to zero:
            sorter.shift_sums(+1, self.offset_sum_u, self.offset_sum_v)

        if self.VERBOSE : print_tdc_ns(tdc_ns, cmt='  TDC after shift_sums ')

        # shift all signals from the anode so that the center of the detector is at x=y=0:
        sorter.shift_position_origin(+1, self.pos_offset_x, self.pos_offset_y)
        sorter.feed_calibration_data(True, self.w_offset) # for calibration of fv, fw, w_offset and correction tables

        if self.VERBOSE : print_tdc_ns(tdc_ns, cmt='  TDC after feed_calibration_data ')

        #logger.info('map_is_full_enough', hexanode.py_sorter_scalefactors_calibration_map_is_full_enough(sorter))

        # NOT VALID FOR QUAD
        #sfco = hexanode.py_scalefactors_calibration_class(sorter) # NOT FOR QUAD
        # break loop if statistics is enough
        #if sfco :
        #    if sfco.map_is_full_enough() : 
        #         logger.info('sfo.map_is_full_enough(): %s  event number: %06d' % (sfco.map_is_full_enough(), evnum))
        #         break

        # Sort the TDC-Data and reconstruct missing signals and apply the time-sum- and NL-correction.
        # number_of_particles is the number of reconstructed particles

        number_of_particles = sorter.sort() if self.command == 1 else\
                              sorter.run_without_sorting()

        #file.get_tdc_data_array(tdc_ns, NUM_HITS)
        if self.VERBOSE :
            logger.info('  (un/)sorted number_of_hits_array %s' % str(number_of_hits[:8]))
            print_tdc_ns(tdc_ns, cmt='  TDC sorted data ')
            logger.info("  Event %5i  number_of_particles: %i" % (evnum, number_of_particles))
            for i in range(number_of_particles) :
                #### IT DID NOT WORK ON LCLS2 because pointer was deleted in py_hit_class.__dealloc__
                hco = hexanode.py_hit_class(sorter, i) 
                logger.info("    p:%2i x:%7.3f y:%7.3f t:%7.3f met:%d" % (i, hco.x, hco.y, hco.time, hco.method))
Esempio n. 2
0
    def fill_corrected_data(self):

        sorter = self.proc.sorter
        Cu1, Cu2, Cv1, Cv2, Cw1, Cw2, Cmcp = sorter.channel_indexes
        number_of_particles = sorter.output_number_of_hits

        if self.STAT_NHITS:
            self.lst_nparts.append(number_of_particles)

        # Discards most of events in command>1
        #=====================
        if number_of_particles < 1:
            logger.debug('no hits found in event ')
            return False
        #=====================

        tdc_ns = self.proc.tdc_ns

        u_ns = tdc_ns[Cu1, 0] - tdc_ns[Cu2, 0]
        v_ns = tdc_ns[Cv1, 0] - tdc_ns[Cv2, 0]
        w_ns = 0  #tdc_ns[Cw1,0] - tdc_ns[Cw2,0]

        u = u_ns * sorter.fu
        v = v_ns * sorter.fv
        w = 0  #(w_ns + self.w_offset) * sorter.fw

        Xuv = u
        Xuw = 0  #u
        Xvw = 0  #v + w
        Yuv = v  #(u - 2*v)*OSQRT3
        Yuw = 0  #(2*w - u)*OSQRT3
        Yvw = 0  # (w - v)*OSQRT3

        dX = 0  # Xuv - Xvw
        dY = 0  # Yuv - Yvw

        time_sum_u_corr = tdc_ns[Cu1, 0] + tdc_ns[Cu2, 0] - 2 * tdc_ns[Cmcp, 0]
        time_sum_v_corr = tdc_ns[Cv1, 0] + tdc_ns[Cv2, 0] - 2 * tdc_ns[Cmcp, 0]
        time_sum_w_corr = 0  # tdc_ns[Cw1,0] + tdc_ns[Cw2,0] - 2*tdc_ns[Cmcp,0]

        if sorter.use_hex:
            w_ns = tdc_ns[Cw1, 0] - tdc_ns[Cw2, 0]
            w = (w_ns + self.w_offset) * sorter.fw

            Xuw = u
            Xvw = v + w
            Yuv = (u - 2 * v) * OSQRT3
            Yuw = (2 * w - u) * OSQRT3
            Yvw = (w - v) * OSQRT3

            dX = Xuv - Xvw
            dY = Yuv - Yvw

            time_sum_w_corr = tdc_ns[Cw1, 0] + tdc_ns[Cw2,
                                                      0] - 2 * tdc_ns[Cmcp, 0]

        #---------

        if self.STAT_UVW or self.STAT_CORRELATIONS:
            self.lst_u_ns.append(u_ns)
            self.lst_v_ns.append(v_ns)
            self.lst_w_ns.append(w_ns)
            self.lst_u.append(u)
            self.lst_v.append(v)
            self.lst_w.append(w)

        if self.STAT_TIME_SUMS or self.STAT_CORRELATIONS:
            self.lst_time_sum_u_corr.append(time_sum_u_corr)
            self.lst_time_sum_v_corr.append(time_sum_v_corr)
            self.lst_time_sum_w_corr.append(time_sum_w_corr)

        if self.STAT_XY_COMPONENTS:
            self.lst_Xuv.append(Xuv)
            self.lst_Xuw.append(Xuw)
            self.lst_Xvw.append(Xvw)

            self.lst_Yuv.append(Yuv)
            self.lst_Yuw.append(Yuw)
            self.lst_Yvw.append(Yvw)

        hco = py_hit_class(sorter, 0)

        if self.STAT_MISC:
            inds_incr = ((Cu1,1), (Cu2,2), (Cv1,4), (Cv2,8), (Cw1,16), (Cw2,32), (Cmcp,64)) if sorter.use_hex else\
                        ((Cu1,1), (Cu2,2), (Cv1,4), (Cv2,8), (Cmcp,16))

            dR = sqrt(dX * dX + dY * dY)
            self.list_dr.append(dR)

            # fill Consistence Indicator
            consistenceIndicator = 0
            for (ind, incr) in inds_incr:
                if self.proc.number_of_hits[ind] > 0:
                    consistenceIndicator += incr
            self.lst_consist_indicator.append(consistenceIndicator)

            self.lst_rec_method.append(hco.method)
            #logger.info('reconstruction method %d' % hco.method)

        if self.STAT_XY_2D:

            # fill 2-d images
            x1, y1 = hco.x, hco.y

            x2, y2 = (-10, -10)
            if number_of_particles > 1:
                hco2 = py_hit_class(sorter, 1)
                x2, y2 = hco2.x, hco2.y

            ix1, ix2, ixuv, ixuw, ixvw = self.img_x_bins.bin_indexes(
                (x1, x2, Xuv, Xuw, Xvw))
            iy1, iy2, iyuv, iyuw, iyvw = self.img_y_bins.bin_indexes(
                (y1, y2, Yuv, Yuw, Yvw))

            self.img_xy_1[iy1, ix1] += 1
            self.img_xy_2[iy2, ix2] += 1
            self.img_xy_uv[iyuv, ixuv] += 1
            self.img_xy_uw[iyuw, ixuw] += 1
            self.img_xy_vw[iyvw, ixvw] += 1

        if self.STAT_PHYSICS:
            if self.proc.number_of_hits[Cmcp] > 1:
                #t0, t1 = tdc_ns[Cmcp,:2]
                #it0, it1 = self.t_ns_bins.bin_indexes((t0, t1))
                #self.t1_vs_t0[it1, it0] += 1

                #ix, iy = self.x_mm_bins.bin_indexes((Xuv,Yuv))
                #self.x_vs_t0[ix, it0] += 1
                #self.y_vs_t0[iy, it0] += 1

                #logger.info("  Event %5i  number_of_particles: %i" % (evnum, number_of_particles))
                #for i in range(number_of_particles) :
                #    hco = py_hit_class(sorter, i)
                #    #logger.info("    p:%2i x:%7.3f y:%7.3f t:%7.3f met:%d" % (i, hco.x, hco.y, hco.time, hco.method))
                #    x,y,t = hco.x, hco.y, hco.time
                #    r = sqrt(x*x+y*y)
                #    if x<0 : r=-r
                #    ir = self.r_mm_bins.bin_indexes((r,))
                #    it = self.t_ns_bins.bin_indexes((t,))
                #    self.r_vs_t[ir, it] += 1

                for x, y, r, t in sorter.xyrt_list():
                    #irx, iry = self.r_mm_bins.bin_indexes((r if x>0 else -r, r if y>0 else -r))
                    iry = self.r_mm_bins.bin_indexes((r if y > 0 else -r, ))
                    it = self.t_ns_bins.bin_indexes((t * SEC_TO_NS, ))
                    #self.rsx_vs_t[irx, it] += 1
                    self.rsy_vs_t[iry, it] += 1

                times = np.array(sorter.t_list()) * SEC_TO_NS
                tinds = self.t_ns_bins.bin_indexes(
                    times)  # INDEXES SHOULD BE np.array
                #print_ndarr(times, '\n    XXX times')
                #print_ndarr(tinds, '\n    XXX tinds')

                # accumulate times in the list
                for t in times:
                    self.lst_t_all.append(t)

                # accumulate times directly in histogram to evaluate average
                self.t_all[tinds] += 1

                # accumulate times in correlation matrix
                for i in tinds:
                    self.ti_vs_tj[i, tinds] += 1

        return True
Esempio n. 3
0
def py_sort():
    print "syntax: sort_LMF filename\n"\
          "        This file will be sorted and\n"\
          "        a new file will be written.\n\n"

    if len(sys.argv) < 2:
        print "Please provide a filename.\n"
        sys.exit(0)

    if len(sys.argv) > 2:
        print "too many arguments\n"
        sys.exit(0)

    tdc_ns = np.zeros((NUM_CHANNELS, NUM_IONS), dtype=np.float64)
    number_of_hits = np.zeros((NUM_CHANNELS, ), dtype=np.int32)

    command = -1

    #   // The "command"-value is set in the first line of "sorter.txt"
    #   // 0 = only convert to new file format
    #   // 1 = sort and write new file
    #   // 2 = calibrate fv, fw, w_offset
    #   // 3 = create calibration table files

    #   // create the sorter:
    sorter = None
    sorter = hexanode.py_sort_class()
    fname_cfg = "sorter.txt"
    status, command, offset_sum_u, offset_sum_v, offset_sum_w, w_offset, pos_offset_x, pos_offset_y=\
        hexanode.py_read_config_file(fname_cfg, sorter)
    print 'read_config_file status, command, offset_sum_u, offset_sum_v, offset_sum_w, w_offset, pos_offset_x, pos_offset_y=',\
                            status, command, offset_sum_u, offset_sum_v, offset_sum_w, w_offset, pos_offset_x, pos_offset_y

    if not status:
        print "WARNING: can't read config file %s" % fname_cfg
        del sorter
        sys.exit(0)

    print 'use_sum_correction', sorter.use_sum_correction
    print 'use_pos_correction', sorter.use_pos_correction
    if sorter is not None:
        if sorter.use_sum_correction or sorter.use_pos_correction:
            status = hexanode.py_read_calibration_tables(
                FNAME_CALIBRATION_TABLE, sorter)

    if command == -1:
        print "no config file was read. Nothing to do."
        if sorter is not None: del sorter
        sys.exit(0)

    Cu1 = sorter.cu1
    Cu2 = sorter.cu2
    Cv1 = sorter.cv1
    Cv2 = sorter.cv2
    Cw1 = sorter.cw1
    Cw2 = sorter.cw2
    Cmcp = sorter.cmcp
    print "Numeration of channels - u1:%i  u2:%i  v1:%i  v2:%i  w1:%i  w2:%i  mcp:%i"%\
          (Cu1, Cu2, Cv1, Cv2, Cw1, Cw2, Cmcp)

    inds_of_channels = (Cu1, Cu2, Cv1, Cv2, Cw1, Cw2)
    incr_of_consistence = (1, 2, 4, 8, 16, 32)
    inds_incr = zip(inds_of_channels, incr_of_consistence)

    LMF_Filename = sys.argv[1]
    LMF = hexanode.lmf_io(NUM_CHANNELS, NUM_IONS)
    if not LMF.open_input_lmf(LMF_Filename):
        print "Can't open file: %s" % LMF_Filename
        sys.exit(0)

    print 'LMF starttime: : %s' % LMF.start_time()
    print 'LMF stoptime   : %s' % LMF.stop_time()

    #   // initialization of the sorter:
    print "init sorter... "

    sorter.set_tdc_resolution_ns(0.025)
    sorter.set_tdc_array_row_length(NUM_IONS)
    sorter.set_count(number_of_hits)
    sorter.set_tdc_pointer(tdc_ns)

    #sorter.set_use_reflection_filter_on_u1(False) # Achim recommended False
    #sorter.set_use_reflection_filter_on_u2(False)

    if command >= 2:
        sorter.create_scalefactors_calibrator(True,\
                                              sorter.runtime_u,\
                                              sorter.runtime_v,\
                                              sorter.runtime_w, 0.78,\
                                              sorter.fu, sorter.fv, sorter.fw)

    error_code = sorter.init_after_setting_parameters()
    if error_code:
        print "sorter could not be initialized\n"
        error_text = sorter.get_error_text(error_code, 512)
        print 'Error %d: %s' % (error_code, error_text)
        sys.exit(0)

    print "Calibration factors:\n  f_U (mm/ns) =%f\n  f_V (mm/ns) =%f\n  f_W (mm/ns) =%f\n  Offset on layer W (ns) =%f\n"%\
          (2*sorter.fu, 2*sorter.fv, 2*sorter.fw, w_offset)

    print "ok for sorter initialization\n"

    print "LMF.tdcresolution %f\n" % LMF.tdc_resolution

    #   while (my_kbhit()); // empty keyboard buffer

    event_counter = 0
    osqrt3 = 1. / sqrt(3.)

    create_output_directory()

    #####################
    #sys.exit('TEST EXIT')
    #####################

    print "reading event data... \n"

    t_sec = time()
    while LMF.read_next_event():

        #number_of_channels = LMF.get_number_of_channels()
        event_number = LMF.get_event_number()
        event_counter += 1

        if not event_number % 10000:
            print 'Event number: %06d' % event_number

#   	//if (event_counter%10000 == 0) {if (my_kbhit()) break;}

#       //==================================
#       // TODO by end user:
#   	// Here you must read in a data block from your data file
#   	// and fill the array tdc_ns[][] and number_of_hits[]

#nhits = np.zeros((NUMBER_OF_CHANNELS,), dtype=np.int32)
        LMF.get_number_of_hits_array(number_of_hits)
        if LMF.error_flag:
            error_text = LMF.get_error_text(LMF.error_flag)
            print "LMF Error %d: %s" % (LMF.error_flag, error_text)
            sys.exit(0)
        if DO_PRINT: print '   number_of_hits_array', number_of_hits[:8]

        LMF.get_tdc_data_array(tdc_ns)

        if LMF.error_flag:
            error_text = LMF.get_error_text(LMF.error_flag)
            print "LMF Error %d: %s" % (LMF.error_flag, error_text)
            sys.exit(0)
        if DO_PRINT: print '   TDC data:\n', tdc_ns[0:8, 0:5]

        #   	// apply conversion to ns
        if True:
            tdc_ns *= LMF.tdc_resolution

#       //==================================
#   	// TODO by end user...

        if PLOT_NHITS:
            sp.lst_nhits_u1.append(number_of_hits[Cu1])
            sp.lst_nhits_u2.append(number_of_hits[Cu2])
            sp.lst_nhits_v1.append(number_of_hits[Cv1])
            sp.lst_nhits_v2.append(number_of_hits[Cv2])
            sp.lst_nhits_w1.append(number_of_hits[Cw1])
            sp.lst_nhits_w2.append(number_of_hits[Cw2])
            sp.lst_nhits_mcp.append(number_of_hits[Cmcp])

        if PLOT_TIME_CH:
            sp.lst_u1.append(tdc_ns[Cu1, 0])
            sp.lst_u2.append(tdc_ns[Cu2, 0])
            sp.lst_v1.append(tdc_ns[Cv1, 0])
            sp.lst_v2.append(tdc_ns[Cv2, 0])
            sp.lst_w1.append(tdc_ns[Cw1, 0])
            sp.lst_w2.append(tdc_ns[Cw2, 0])
            sp.lst_mcp.append(tdc_ns[Cmcp, 0])

        if PLOT_REFLECTIONS:
            if number_of_hits[Cu2] > 1:
                sp.lst_refl_u1.append(tdc_ns[Cu2, 1] - tdc_ns[Cu1, 0])
            if number_of_hits[Cu1] > 1:
                sp.lst_refl_u2.append(tdc_ns[Cu1, 1] - tdc_ns[Cu2, 0])
            if number_of_hits[Cv2] > 1:
                sp.lst_refl_v1.append(tdc_ns[Cv2, 1] - tdc_ns[Cv1, 0])
            if number_of_hits[Cv1] > 1:
                sp.lst_refl_v2.append(tdc_ns[Cv1, 1] - tdc_ns[Cv2, 0])
            if number_of_hits[Cw2] > 1:
                sp.lst_refl_w1.append(tdc_ns[Cw2, 1] - tdc_ns[Cw1, 0])
            if number_of_hits[Cw1] > 1:
                sp.lst_refl_w2.append(tdc_ns[Cw1, 1] - tdc_ns[Cw2, 0])

        time_sum_u = tdc_ns[Cu1, 0] + tdc_ns[Cu2, 0] - 2 * tdc_ns[Cmcp, 0]
        time_sum_v = tdc_ns[Cv1, 0] + tdc_ns[Cv2, 0] - 2 * tdc_ns[Cmcp, 0]
        time_sum_w = tdc_ns[Cw1, 0] + tdc_ns[Cw2, 0] - 2 * tdc_ns[Cmcp, 0]

        u_ns = tdc_ns[Cu1, 0] - tdc_ns[Cu2, 0]
        v_ns = tdc_ns[Cv1, 0] - tdc_ns[Cv2, 0]
        w_ns = tdc_ns[Cw1, 0] - tdc_ns[Cw2, 0]

        u = u_ns * sorter.fu
        v = v_ns * sorter.fv
        w = (w_ns + w_offset) * sorter.fw

        Xuv = u
        Xuw = u
        Xvw = v + w
        Yuv = (u - 2 * v) * osqrt3
        Yuw = (2 * w - u) * osqrt3
        Yvw = (w - v) * osqrt3

        dX = Xuv - Xvw
        dY = Yuv - Yvw
        Deviation = sqrt(dX * dX + dY * dY)

        if sorter.use_hex:
            # shift the time sums to zero:
            sorter.shift_sums(+1, offset_sum_u, offset_sum_v, offset_sum_w)
            #shift layer w so that the middle lines of all layers intersect in one point:
            sorter.shift_layer_w(+1, w_offset)
        else:
            # shift the time sums to zero:
            sorter.shift_sums(+1, offset_sum_u, offset_sum_v)

# shift all signals from the anode so that the center of the detector is at x=y=0:
        sorter.shift_position_origin(+1, pos_offset_x, pos_offset_y)

        sorter.feed_calibration_data(
            True, w_offset
        )  # for calibration of fv, fw, w_offset and correction tables

        #LMF.get_tdc_data_array(tdc_ns)

        time_sum_u_corr = tdc_ns[Cu1, 0] + tdc_ns[Cu2, 0] - 2 * tdc_ns[Cmcp, 0]
        time_sum_v_corr = tdc_ns[Cv1, 0] + tdc_ns[Cv2, 0] - 2 * tdc_ns[Cmcp, 0]
        time_sum_w_corr = tdc_ns[Cw1, 0] + tdc_ns[Cw2, 0] - 2 * tdc_ns[Cmcp, 0]

        #print 'map_is_full_enough', hexanode.py_sorter_scalefactors_calibration_map_is_full_enough(sorter)
        sfco = hexanode.py_scalefactors_calibration_class(sorter)

        # break loop if statistics is enough
        if sfco:
            if sfco.map_is_full_enough():
                print 'sfo.map_is_full_enough(): %s  event number: %06d' % (
                    sfco.map_is_full_enough(), event_number)
                break

        if PLOT_XY_RESOLUTION:
            #print "    binx: %d  biny: %d  resolution(FWHM): %.6f" % (sfco.binx, sfco.biny, sfco.detector_map_resol_FWHM_fill)
            if sfco.binx >= 0 and sfco.biny >= 0:
                sp.lst_binx.append(sfco.binx)
                sp.lst_biny.append(sfco.biny)
                sp.lst_resol_fwhm.append(sfco.detector_map_resol_FWHM_fill)

        # Sort the TDC-Data and reconstruct missing signals and apply the sum- and NL-correction.
        # number_of_particles is the number of reconstructed particles
        number_of_particles = sorter.sort() if command == 1 else\
                                  sorter.run_without_sorting()

        if False:
            print "  Event %5i  number_of_particles: %i" % (
                event_number, number_of_particles)
            for i in range(number_of_particles):
                hco = hexanode.py_hit_class(sorter, i)
                print "    p:%1i x:%.3f y:%.3f t:%.3f met:%d" % (
                    i, hco.x, hco.y, hco.time, hco.method)

            print "    part1 u:%.3f v:%.3f w:%.3f" % (u, v, w)

#       // TODO by end user..."

        if number_of_particles < 1: continue

        hco = hexanode.py_hit_class(sorter, 0)

        if PLOT_UVW or PLOT_CORRELATIONS:
            sp.lst_u_ns.append(u_ns)
            sp.lst_v_ns.append(v_ns)
            sp.lst_w_ns.append(w_ns)
            sp.lst_u.append(u)
            sp.lst_v.append(v)
            sp.lst_w.append(w)

        if PLOT_TIME_SUMS or PLOT_CORRELATIONS:
            sp.lst_time_sum_u.append(time_sum_u)
            sp.lst_time_sum_v.append(time_sum_v)
            sp.lst_time_sum_w.append(time_sum_w)

            sp.lst_time_sum_u_corr.append(time_sum_u_corr)
            sp.lst_time_sum_v_corr.append(time_sum_v_corr)
            sp.lst_time_sum_w_corr.append(time_sum_w_corr)

        if PLOT_XY_COMPONENTS:
            sp.lst_Xuv.append(Xuv)
            sp.lst_Xuw.append(Xuw)
            sp.lst_Xvw.append(Xvw)

            sp.lst_Yuv.append(Yuv)
            sp.lst_Yuw.append(Yuw)
            sp.lst_Yvw.append(Yvw)

        if PLOT_MISC:
            sp.lst_Deviation.append(Deviation)

            # fill Consistence Indicator
            consistenceIndicator = 0
            for (ind, incr) in inds_incr:
                if number_of_hits[ind] > 0: consistenceIndicator += incr
            sp.lst_consist_indicator.append(consistenceIndicator)

            sp.lst_rec_method.append(hco.method)
            #print 'reconstruction method %d' % hco.method

        if PLOT_XY_2D:
            # fill 2-d images
            x1, y1 = hco.x, hco.y

            x2, y2 = (-10, -10)
            if number_of_particles > 1:
                hco2 = hexanode.py_hit_class(sorter, 1)
                x2, y2 = hco2.x, hco2.y

            ix1, ix2, ixuv, ixuw, ixvw = sp.img_x_bins.bin_indexes(
                (x1, x2, Xuv, Xuw, Xvw))
            iy1, iy2, iyuv, iyuw, iyvw = sp.img_y_bins.bin_indexes(
                (y1, y2, Yuv, Yuw, Yvw))

            sp.img_xy_1[iy1, ix1] += 1
            sp.img_xy_2[iy2, ix2] += 1
            sp.img_xy_uv[iyuv, ixuv] += 1
            sp.img_xy_uw[iyuw, ixuw] += 1
            sp.img_xy_vw[iyvw, ixvw] += 1

        if PLOT_PHYSICS:
            if number_of_hits[Cmcp] > 1:
                t0, t1 = tdc_ns[Cmcp, :2]
                it0, it1 = sp.t_ns_bins.bin_indexes((t0, t1))
                sp.t1_vs_t0[it1, it0] += 1

                ix, iy = sp.x_mm_bins.bin_indexes((Xuv, Yuv))
                #iy = sp.y_mm_bins.bin_indexes((Yuv,))
                sp.x_vs_t0[ix, it0] += 1
                sp.y_vs_t0[iy, it0] += 1

#   	// write the results into a new data file.
#   	// the variable "number_of_particles" contains the number of reconstructed particles.
#   	// the x and y (in mm) and TOF (in ns) is stored in the array sorter->output_hit_array:

#   	// for the i-th particle (i starts from 0):
#       // hco= hexanode.py_hit_class(sorter, i)
#       // hco.x, hco.y, hco.time

#   	// for each particle you can also retrieve the information about how the particle
#   	// was reconstructed (tog et some measure of the confidence):
#   	// hco.method

#   end of the while loop

    if command == 2:
        print "calibrating detector... "
        sorter.do_calibration()
        print "ok - after do_calibration"
        sfco = hexanode.py_scalefactors_calibration_class(sorter)
        if sfco:
            print "Good calibration factors are:\n  f_U =%f\n  f_V =%f\n  f_W =%f\n  Offset on layer W=%f\n"%\
                  (2*sorter.fu, 2*sfco.best_fv, 2*sfco.best_fw, sfco.best_w_offset)

    if command == 3:  # generate and print correction tables for sum- and position-correction
        print "creating calibration tables..."
        status = hexanode.py_create_calibration_tables(FNAME_CALIBRATION_TABLE,
                                                       sorter)
        print "finished creating calibration tables: %s status %s" % (
            FNAME_CALIBRATION_TABLE, status)

    print "consumed time (sec) = %.6f\n" % (time() - t_sec)

    if sorter is not None: del sorter
Esempio n. 4
0
def calib_on_data(**kwargs):

    OSQRT3 = 1. / sqrt(3.)
    CTYPE_HEX_CONFIG = 'hex_config'
    CTYPE_HEX_TABLE = 'hex_table'

    print(usage())
    #SRCCHS
    #DSNAME       = kwargs.get('dsname', '/reg/g/psdm/detector/data2_test/xtc/data-amox27716-r0100-acqiris-e000100.xtc2')
    COMMAND = kwargs.get('command', 0)
    IFNAME = kwargs.get(
        'ifname',
        '/reg/g/psdm/detector/data_test/hdf5/amox27716-r0100-e060000-single-node.h5'
    )
    DETNAME = kwargs.get('detname', 'tmo_hexanode')
    EVSKIP = kwargs.get('evskip', 0)
    EVENTS = kwargs.get('events', 1000000) + EVSKIP
    OFPREFIX = kwargs.get('ofprefix', './figs-hexanode/plot')
    NUM_CHANNELS = kwargs.get('numchs', 5)
    NUM_HITS = kwargs.get('numhits', 16)
    calibtab = kwargs.get('calibtab', None)
    calibcfg = kwargs.get('calibcfg', None)
    PLOT_HIS = kwargs.get('plot_his', True)
    SAVE_HIS = kwargs.get('save_his', False)
    VERBOSE = kwargs.get('verbose', False)

    print(gu.str_kwargs(kwargs, title='input parameters:'))

    sp.set_parameters(**kwargs)  # save parameters in store for graphics

    #=====================

    file = open_input_h5file(IFNAME)

    #=====================

    CALIBTAB = calibtab  #if calibtab is not None else file.find_calib_file(type=CTYPE_HEX_TABLE)
    CALIBCFG = calibcfg  #if calibcfg is not None else file.find_calib_file(type=CTYPE_HEX_CONFIG)

    #=====================

    print('events in file : %s' % file.h5ds_nevents)
    print('start time     : %s' % file.start_time())
    print('stop time      : %s' % file.stop_time())
    print('tdc_resolution : %s' % file.tdc_resolution())
    print('CALIBTAB       : %s' % CALIBTAB)
    print('CALIBCFG       : %s' % CALIBCFG)

    #print('file calib_dir   : %s' % file.calib_dir())
    #print('file calib_src   : %s' % file.calib_src())
    #print('file calib_group : %s' % file.calib_group())
    #print('file ctype_dir   : %s' % file.calibtype_dir())

    #   // The "command"-value is set in the first line of "sorter.txt"
    #   // 0 = only convert to new file format
    #   // 1 = sort and write new file
    #   // 2 = calibrate fv, fw, w_offset
    #   // 3 = create calibration table files

    #   // create the sorter:
    sorter = hexanode.py_sort_class()
    status, command_cfg, offset_sum_u, offset_sum_v, offset_sum_w, w_offset, pos_offset_x, pos_offset_y=\
        hexanode.py_read_config_file(CALIBCFG.encode(), sorter)
    #command = COMMAND # command_cfg
    command = command_cfg

    print('read_config_file status, COMMAND, offset_sum_u, offset_sum_v, offset_sum_w, w_offset, pos_offset_x, pos_offset_y=',\
                            status, command, offset_sum_u, offset_sum_v, offset_sum_w, w_offset, pos_offset_x, pos_offset_y)

    if not status:
        print("WARNING: can't read config file %s" % fname_cfg)
        del sorter
        sys.exit(0)

    print('use_sum_correction', sorter.use_sum_correction)
    print('use_pos_correction HEX ONLY', sorter.use_pos_correction)
    if sorter is not None:
        if sorter.use_sum_correction or sorter.use_pos_correction:
            status = hexanode.py_read_calibration_tables(
                CALIBTAB.encode(), sorter)

    if command == -1:
        print("no config file was read. Nothing to do.")
        if sorter is not None: del sorter
        sys.exit(0)

    Cu1 = sorter.cu1
    Cu2 = sorter.cu2
    Cv1 = sorter.cv1
    Cv2 = sorter.cv2
    Cw1 = sorter.cw1
    Cw2 = sorter.cw2
    Cmcp = sorter.cmcp
    print("Numeration of channels - u1:%i  u2:%i  v1:%i  v2:%i  w1:%i  w2:%i  mcp:%i"%\
          (Cu1, Cu2, Cv1, Cv2, Cw1, Cw2, Cmcp))

    #inds_of_channels    = (Cu1, Cu2, Cv1, Cv2, Cw1, Cw2)
    #incr_of_consistence = (  1,   2,   4,   8,  16,  32)
    inds_of_channels = (Cu1, Cu2, Cv1, Cv2, Cmcp)
    incr_of_consistence = (1, 2, 4, 8, 16)
    inds_incr = list(zip(inds_of_channels, incr_of_consistence))

    #print("chanel increments:", inds_incr)

    #=====================
    #=====================
    #=====================

    print("init sorter... ")

    tdc_ns = np.zeros((NUM_CHANNELS, NUM_HITS), dtype=np.float64)
    number_of_hits = np.zeros((NUM_CHANNELS, ), dtype=np.int32)

    sorter.set_tdc_resolution_ns(file.tdc_resolution())
    sorter.set_tdc_array_row_length(NUM_HITS)
    sorter.set_count(number_of_hits)
    sorter.set_tdc_pointer(tdc_ns)

    #sorter.set_use_reflection_filter_on_u1(False) # Achim recommended False
    #sorter.set_use_reflection_filter_on_u2(False)

    if command >= 2:
        sorter.create_scalefactors_calibrator(True,\
                                              sorter.runtime_u,\
                                              sorter.runtime_v,\
                                              sorter.runtime_w, 0.78,\
                                              sorter.fu, sorter.fv, sorter.fw)

    error_code = sorter.init_after_setting_parameters()

    if error_code:
        print("sorter could not be initialized\n")
        error_text = sorter.get_error_text(error_code, 512)
        print('Error %d: %s' % (error_code, error_text))
        sys.exit(0)

    print("Calibration factors:\n  f_U (mm/ns) =%f\n  f_V (mm/ns) =%f\n  f_W (mm/ns) =%f\n  Offset on layer W (ns) =%f\n"%\
          (2*sorter.fu, 2*sorter.fv, 2*sorter.fw, w_offset))

    print("ok for sorter initialization\n")

    create_output_directory(OFPREFIX)

    print("reading event data... \n")

    evnum = 0
    t_sec = time()
    t1_sec = time()
    while file.next_event():
        evnum = file.event_number()

        if evnum < EVSKIP: continue
        if evnum > EVENTS: break

        if gu.do_print(evnum):
            t1 = time()
            print('Event: %06d, dt(sec): %.3f' % (evnum, t1 - t1_sec))
            t1_sec = t1

#   	//if (event_counter%10000 == 0) {if (my_kbhit()) break;}

#       //==================================
#       // TODO by end user:
#   	// Here you must read in a data block from your data file
#   	// and fill the array tdc_ns[][] and number_of_hits[]

#nhits = np.zeros((NUMBER_OF_CHANNELS,), dtype=np.int32)
        file.get_number_of_hits_array(number_of_hits, maxvalue=NUM_HITS)
        if file.error_flag():
            error_text = file.get_error_text(file.error_flag())
            print("file Error %d: %s" % (file.error_flag(), error_text))
            sys.exit(0)

        if VERBOSE: print('====raw number_of_hits_array', number_of_hits[:])
        #number_of_hits = np.array([n if n<NUM_HITS else NUM_HITS for n in number_of_hits])
        #if VERBOSE : print('   number_of_hits_array constrained ', number_of_hits[:8])

        file.get_tdc_data_array(tdc_ns, maxsize=NUM_HITS)

        if file.error_flag():
            error_text = file.get_error_text(file.error_flag())
            print("file Error %d: %s" % (file.error_flag(), error_text))
            sys.exit(0)

        conds = number_of_hits[:5] == 0
        if conds.any(): continue

        #   	// apply conversion to ns
        #        if False : # file returns tdc_ns already in [ns]
        #            tdc_ns *= file.tdc_resolution()

        #       //==================================
        if sp.PLOT_NHITS:
            sp.lst_nhits_u1.append(number_of_hits[Cu1])
            sp.lst_nhits_u2.append(number_of_hits[Cu2])
            sp.lst_nhits_v1.append(number_of_hits[Cv1])
            sp.lst_nhits_v2.append(number_of_hits[Cv2])
            #sp.lst_nhits_w1 .append(number_of_hits[Cw1])
            #sp.lst_nhits_w2 .append(number_of_hits[Cw2])
            sp.lst_nhits_mcp.append(number_of_hits[Cmcp])

        if sp.PLOT_TIME_CH:
            sp.lst_u1.append(tdc_ns[Cu1, 0])
            sp.lst_u2.append(tdc_ns[Cu2, 0])
            sp.lst_v1.append(tdc_ns[Cv1, 0])
            sp.lst_v2.append(tdc_ns[Cv2, 0])
            #sp.lst_w1 .append(tdc_ns[Cw1,0])
            #sp.lst_w2 .append(tdc_ns[Cw2,0])
            sp.lst_mcp.append(tdc_ns[Cmcp, 0])

        if sp.PLOT_REFLECTIONS:
            if number_of_hits[Cu2] > 1:
                sp.lst_refl_u1.append(tdc_ns[Cu2, 1] - tdc_ns[Cu1, 0])
            if number_of_hits[Cu1] > 1:
                sp.lst_refl_u2.append(tdc_ns[Cu1, 1] - tdc_ns[Cu2, 0])
            if number_of_hits[Cv2] > 1:
                sp.lst_refl_v1.append(tdc_ns[Cv2, 1] - tdc_ns[Cv1, 0])
            if number_of_hits[Cv1] > 1:
                sp.lst_refl_v2.append(tdc_ns[Cv1, 1] - tdc_ns[Cv2, 0])
            #if number_of_hits[Cw2]>1 : sp.lst_refl_w1.append(tdc_ns[Cw2,1] - tdc_ns[Cw1,0])
            #if number_of_hits[Cw1]>1 : sp.lst_refl_w2.append(tdc_ns[Cw1,1] - tdc_ns[Cw2,0])

        #--------- preserve RAW time sums
        #time_sum_u = deepcopy(tdc_ns[Cu1,0] + tdc_ns[Cu2,0] - 2*tdc_ns[Cmcp,0]) #deepcopy(...)
        #time_sum_v = deepcopy(tdc_ns[Cv1,0] + tdc_ns[Cv2,0] - 2*tdc_ns[Cmcp,0])
        time_sum_u = tdc_ns[Cu1, 0] + tdc_ns[Cu2, 0] - 2 * tdc_ns[Cmcp, 0]
        time_sum_v = tdc_ns[Cv1, 0] + tdc_ns[Cv2, 0] - 2 * tdc_ns[Cmcp, 0]
        time_sum_w = 0  #tdc_ns[Cw1,0] + tdc_ns[Cw2,0] - 2*tdc_ns[Cmcp,0]

        #print("RAW time_sum_u, time_sum_v:", time_sum_u, time_sum_v)
        #---------

        if VERBOSE: print_tdc_ns(tdc_ns, cmt='  TDC raw data ')

        if sorter.use_hex:
            # shift the time sums to zero:
            sorter.shift_sums(+1, offset_sum_u, offset_sum_v, offset_sum_w)
            #shift layer w so that the middle lines of all layers intersect in one point:
            sorter.shift_layer_w(+1, w_offset)
        else:
            # shift the time sums to zero:
            sorter.shift_sums(+1, offset_sum_u, offset_sum_v)

        if VERBOSE: print_tdc_ns(tdc_ns, cmt='  TDC after shift_sums ')

        # shift all signals from the anode so that the center of the detector is at x=y=0:
        sorter.shift_position_origin(+1, pos_offset_x, pos_offset_y)
        sorter.feed_calibration_data(
            True, w_offset
        )  # for calibration of fv, fw, w_offset and correction tables

        if VERBOSE:
            print_tdc_ns(tdc_ns, cmt='  TDC after feed_calibration_data ')

        #print('map_is_full_enough', hexanode.py_sorter_scalefactors_calibration_map_is_full_enough(sorter))

        # NOT VALID FOR QUAD
        #sfco = hexanode.py_scalefactors_calibration_class(sorter) # NOT FOR QUAD
        # break loop if statistics is enough
        #if sfco :
        #    if sfco.map_is_full_enough() :
        #         print('sfo.map_is_full_enough(): %s  event number: %06d' % (sfco.map_is_full_enough(), evnum))
        #         break

        #if sp.PLOT_XY_RESOLUTION :
        #    #print("    binx: %d  biny: %d  resolution(FWHM): %.6f" % (sfco.binx, sfco.biny, sfco.detector_map_resol_FWHM_fill))
        #    if sfco.binx>=0 and sfco.biny>=0 :
        #        sp.lst_binx.append(sfco.binx)
        #        sp.lst_biny.append(sfco.biny)
        #        sp.lst_resol_fwhm.append(sfco.detector_map_resol_FWHM_fill)

        # Sort the TDC-Data and reconstruct missing signals and apply the time-sum- and NL-correction.
        # number_of_particles is the number of reconstructed particles

        number_of_particles = sorter.sort() if command == 1 else\
                              sorter.run_without_sorting()

        #file.get_tdc_data_array(tdc_ns, NUM_HITS)
        if VERBOSE:
            print('    (un/)sorted number_of_hits_array', number_of_hits[:8])
        if VERBOSE: print_tdc_ns(tdc_ns, cmt='  TDC sorted data ')
        if VERBOSE:
            print("  Event %5i  number_of_particles: %i" %
                  (evnum, number_of_particles))
            for i in range(number_of_particles):
                #### IT DID NOT WORK ON LCLS2 because pointer was deleted in py_hit_class.__dealloc__
                hco = hexanode.py_hit_class(sorter, i)
                print("    p:%2i x:%7.3f y:%7.3f t:%7.3f met:%d" %
                      (i, hco.x, hco.y, hco.time, hco.method))

        #print_tdc_ns(tdc_ns, cmt='  TDC sorted data ')
        #print('    XXX sorter.time_list', sorter.t_list())

        if sp.PLOT_NHITS:
            sp.lst_nparts.append(number_of_particles)

        # Discards most of events in command>1
        #=====================
        if number_of_particles < 1: continue
        #=====================

        #       // TODO by end user..."

        #---------

        u_ns = tdc_ns[Cu1, 0] - tdc_ns[Cu2, 0]
        v_ns = tdc_ns[Cv1, 0] - tdc_ns[Cv2, 0]
        w_ns = 0  #tdc_ns[Cw1,0] - tdc_ns[Cw2,0]

        u = u_ns * sorter.fu
        v = v_ns * sorter.fv
        w = 0  #(w_ns + w_offset) * sorter.fw

        Xuv = u
        Xuw = 0  #u
        Xvw = 0  #v + w
        Yuv = v  #(u - 2*v)*OSQRT3
        Yuw = 0  #(2*w - u)*OSQRT3
        Yvw = 0  # (w - v)*OSQRT3

        dX = 0  # Xuv - Xvw
        dY = 0  # Yuv - Yvw
        dR = sqrt(dX * dX + dY * dY)

        time_sum_u_corr = tdc_ns[Cu1, 0] + tdc_ns[Cu2, 0] - 2 * tdc_ns[Cmcp, 0]
        time_sum_v_corr = tdc_ns[Cv1, 0] + tdc_ns[Cv2, 0] - 2 * tdc_ns[Cmcp, 0]
        time_sum_w_corr = 0  #tdc_ns[Cw1,0] + tdc_ns[Cw2,0] - 2*tdc_ns[Cmcp,0]

        #---------

        if sp.PLOT_UVW or sp.PLOT_CORRELATIONS:
            sp.lst_u_ns.append(u_ns)
            sp.lst_v_ns.append(v_ns)
            sp.lst_w_ns.append(w_ns)
            sp.lst_u.append(u)
            sp.lst_v.append(v)
            sp.lst_w.append(w)

        if sp.PLOT_TIME_SUMS or sp.PLOT_CORRELATIONS:
            sp.lst_time_sum_u.append(time_sum_u)
            sp.lst_time_sum_v.append(time_sum_v)
            sp.lst_time_sum_w.append(time_sum_w)

            sp.lst_time_sum_u_corr.append(time_sum_u_corr)
            sp.lst_time_sum_v_corr.append(time_sum_v_corr)
            sp.lst_time_sum_w_corr.append(time_sum_w_corr)

        if sp.PLOT_XY_COMPONENTS:
            sp.lst_Xuv.append(Xuv)
            sp.lst_Xuw.append(Xuw)
            sp.lst_Xvw.append(Xvw)

            sp.lst_Yuv.append(Yuv)
            sp.lst_Yuw.append(Yuw)
            sp.lst_Yvw.append(Yvw)

        hco = hexanode.py_hit_class(sorter, 0)

        if sp.PLOT_MISC:
            sp.list_dr.append(dR)

            # fill Consistence Indicator
            consistenceIndicator = 0
            for (ind, incr) in inds_incr:
                if number_of_hits[ind] > 0: consistenceIndicator += incr
            sp.lst_consist_indicator.append(consistenceIndicator)

            sp.lst_rec_method.append(hco.method)
            #print('reconstruction method %d' % hco.method)

        if sp.PLOT_XY_2D:

            # fill 2-d images
            x1, y1 = hco.x, hco.y

            x2, y2 = (-10, -10)
            if number_of_particles > 1:
                hco2 = hexanode.py_hit_class(sorter, 1)
                x2, y2 = hco2.x, hco2.y

            ix1, ix2, ixuv, ixuw, ixvw = sp.img_x_bins.bin_indexes(
                (x1, x2, Xuv, Xuw, Xvw))
            iy1, iy2, iyuv, iyuw, iyvw = sp.img_y_bins.bin_indexes(
                (y1, y2, Yuv, Yuw, Yvw))

            sp.img_xy_1[iy1, ix1] += 1
            sp.img_xy_2[iy2, ix2] += 1
            sp.img_xy_uv[iyuv, ixuv] += 1
            sp.img_xy_uw[iyuw, ixuw] += 1
            sp.img_xy_vw[iyvw, ixvw] += 1

        if sp.PLOT_PHYSICS:
            if number_of_hits[Cmcp] > 1:
                #t0, t1 = tdc_ns[Cmcp,:2]
                #it0, it1 = sp.t_ns_bins.bin_indexes((t0, t1))
                #sp.t1_vs_t0[it1, it0] += 1

                #ix, iy = sp.x_mm_bins.bin_indexes((Xuv,Yuv))
                #sp.x_vs_t0[ix, it0] += 1
                #sp.y_vs_t0[iy, it0] += 1

                #print("  Event %5i  number_of_particles: %i" % (evnum, number_of_particles))
                #for i in range(number_of_particles) :
                #    hco = hexanode.py_hit_class(sorter, i)
                #    #print("    p:%2i x:%7.3f y:%7.3f t:%7.3f met:%d" % (i, hco.x, hco.y, hco.time, hco.method))
                #    x,y,t = hco.x, hco.y, hco.time
                #    r = sqrt(x*x+y*y)
                #    if x<0 : r=-r
                #    ir = sp.r_mm_bins.bin_indexes((r,))
                #    it = sp.t_ns_bins.bin_indexes((t,))
                #    sp.r_vs_t[ir, it] += 1

                for x, y, r, t in sorter.xyrt_list():
                    irx, iry = sp.r_mm_bins.bin_indexes(
                        (r if x > 0 else -r, r if y > 0 else -r))
                    it = sp.t_ns_bins.bin_indexes((t, ))
                    sp.rsx_vs_t[irx, it] += 1
                    sp.rsy_vs_t[iry, it] += 1

                times = sorter.t_list()
                tinds = sp.t_ns_bins.bin_indexes(
                    times)  # INDEXES SHOULD BE np.array
                #print_ndarr(times, '\n    XXX times')
                #print_ndarr(tinds, '\n    XXX tinds')

                # accumulate times in the list
                for t in times:
                    sp.lst_t_all.append(t)

                # accumulate times directly in histogram to evaluate average
                sp.t_all[tinds] += 1

                # accumulate times in correlation matrix
                for i in tinds:
                    sp.ti_vs_tj[i, tinds] += 1

#   	// write the results into a new data file.
#   	// the variable "number_of_particles" contains the number of reconstructed particles.
#   	// the x and y (in mm) and TOF (in ns) is stored in the array sorter->output_hit_array:

#   	// for the i-th particle (i starts from 0):
#       // hco= hexanode.py_hit_class(sorter, i)
#       // hco.x, hco.y, hco.time

#   	// for each particle you can also retrieve the information about how the particle
#   	// was reconstructed (tog et some measure of the confidence):
#   	// hco.method

#   end of the while loop
    print("end of the while loop... \n")

    if command == 2:
        print("sorter.do_calibration()... for command=2")
        sorter.do_calibration()
        print("ok - after do_calibration")

        # QUAD SHOULD NOT USE: scalefactors_calibration_class

        #sfco = hexanode.py_scalefactors_calibration_class(sorter)
        #if sfco :
        #    print("Good calibration factors are:\n  f_U =%f\n  f_V =%f\n  f_W =%f\n  Offset on layer W=%f\n"%\
        #          (2*sorter.fu, 2*sfco.best_fv, 2*sfco.best_fw, sfco.best_w_offset))
        #
        #    print('CALIBRATION: These parameters and time sum offsets from histograms should be set in the file\n  %s' % CALIBCFG)

    if command == 3:  # generate and print(correction tables for sum- and position-correction
        CALIBTAB = calibtab if calibtab is not None else\
                   file.make_calib_file_path(type=CTYPE_HEX_TABLE)
        print("creating calibration table in file: %s" % CALIBTAB)
        status = hexanode.py_create_calibration_tables(CALIBTAB.encode(),
                                                       sorter)
        print("CALIBRATION: finished creating calibration tables: status %s" %
              status)

        #=====================
        #sys.exit('TEST EXIT in QuadCalib')
        #=====================

    print("consumed time (sec) = %.6f\n" % (time() - t_sec))

    if sorter is not None: del sorter

    if PLOT_HIS:
        plot_histograms(prefix=OFPREFIX, do_save=SAVE_HIS, hwin_x0y0=(0, 0))
        show()
Esempio n. 5
0
        if sfco:
            if sfco.map_is_full_enough():
                print 'sfo.map_is_full_enough(): %s  event number: %06d' % (
                    sfco.map_is_full_enough(), event_number)
                break

        # Sort the TDC-Data and reconstruct missing signals and apply the sum- and NL-correction.
        # number_of_particles is the number of reconstructed particles
        number_of_particles = sorter.sort() if command == 1 else\
                                  sorter.run_without_sorting()

        if False:
            print "  Event %5i  number_of_particles: %i" % (
                event_number, number_of_particles)
            for i in range(number_of_particles):
                hco = hexanode.py_hit_class(sorter, i)
                print "    p:%1i x:%.3f y:%.3f t:%.3f met:%d" % (
                    i, hco.x, hco.y, hco.time, hco.method)

            u = tdc_ns[sorter.cu1, 0] + tdc_ns[sorter.cu2,
                                               0] - 2 * tdc_ns[sorter.cmcp, 0]
            v = tdc_ns[sorter.cv1, 0] + tdc_ns[sorter.cv2,
                                               0] - 2 * tdc_ns[sorter.cmcp, 0]
            w = tdc_ns[sorter.cw1, 0] + tdc_ns[sorter.cw2,
                                               0] - 2 * tdc_ns[sorter.cmcp, 0]

            print "    part1 u:%.3f v:%.3f w:%.3f" % (u, v, w)

#       // TODO by end user..."

#   	// write the results into a new data file.
Esempio n. 6
0
def py_sort(**kwargs):

    SRCCHS = kwargs.get('srcchs', {
        'AmoETOF.0:Acqiris.0': (6, 7, 8, 9, 10, 11),
        'AmoITOF.0:Acqiris.0': (0, )
    })
    DSNAME = kwargs.get('dsname', 'exp=xpptut15:run=390:smd'
                        )  # or h5 file: 'xpptut15-r0390-e300000-n32-mpi.h5'
    EVSKIP = kwargs.get('evskip', 0)
    EVENTS = kwargs.get('events', 100) + EVSKIP
    NUM_CHANNELS = kwargs.get('numchs', 7)
    NUM_HITS = kwargs.get('numhits', 16)
    CALIBTAB = kwargs.get('calibtab', 'calibration_table_data.txt')
    VERBOSE = kwargs.get('verbose', False)

    tdc_ns = np.zeros((NUM_CHANNELS, NUM_HITS), dtype=np.float64)
    number_of_hits = np.zeros((NUM_CHANNELS, ), dtype=np.int32)

    command = -1

    # The "command"-value is set in the first line of configuration file "sorter_data_cfg.txt"
    # 1 = sort and write new file
    # 2 = calibrate fv, fw, w_offset
    # 3 = create calibration table files

    # create the sorter object:
    sorter = hexanode.py_sort_class()
    fname_cfg = "sorter_data_cfg.txt"
    status, command, offset_sum_u, offset_sum_v, offset_sum_w, w_offset, pos_offset_x, pos_offset_y=\
        hexanode.py_read_config_file(fname_cfg, sorter)
    print 'read_config_file status, command, offset_sum_u, offset_sum_v, offset_sum_w, w_offset, pos_offset_x, pos_offset_y=',\
                            status, command, offset_sum_u, offset_sum_v, offset_sum_w, w_offset, pos_offset_x, pos_offset_y

    if not status:
        print "WARNING: can't read config file %s" % fname_cfg
        del sorter
        sys.exit(0)

    print 'use_sum_correction', sorter.use_sum_correction
    print 'use_pos_correction', sorter.use_pos_correction
    if sorter is not None:
        if sorter.use_sum_correction or sorter.use_pos_correction:
            status = hexanode.py_read_calibration_tables(CALIBTAB, sorter)

    if command == -1:
        print "no config file was read. Nothing to do."
        if sorter is not None: del sorter
        sys.exit(0)

    Cu1 = sorter.cu1
    Cu2 = sorter.cu2
    Cv1 = sorter.cv1
    Cv2 = sorter.cv2
    Cw1 = sorter.cw1
    Cw2 = sorter.cw2
    Cmcp = sorter.cmcp
    print "Numeration of channels - u1:%i  u2:%i  v1:%i  v2:%i  w1:%i  w2:%i  mcp:%i"%\
          (Cu1, Cu2, Cv1, Cv2, Cw1, Cw2, Cmcp)

    inds_of_channels = (Cu1, Cu2, Cv1, Cv2, Cw1, Cw2)
    incr_of_consistence = (1, 2, 4, 8, 16, 32)
    inds_incr = zip(inds_of_channels, incr_of_consistence)

    DIO = HexDataIO(srcchs=SRCCHS, numchs=NUM_CHANNELS, numhits=NUM_HITS)

    #=====================
    if '.h5' in DSNAME: DIO.open_input_h5file(DSNAME)
    else:
        DIO.open_input_dataset(DSNAME, pbits=0)

        DIO.set_wf_hit_finder_parameters(**kwargs)
        DIO.print_wf_hit_finder_parameters()
    #=====================

    print 'DIO experiment : %s' % DIO.experiment()
    print 'DIO run        : %s' % DIO.run()
    print 'DIO start time : %s' % DIO.start_time()
    print 'DIO stop time  : %s' % DIO.stop_time()
    print 'DIO tdc_resolution : %.3f' % DIO.tdc_resolution()

    print "init sorter... "

    #sorter.set_tdc_resolution_ns(0.025)
    sorter.set_tdc_resolution_ns(DIO.tdc_resolution())
    sorter.set_tdc_array_row_length(NUM_HITS)
    sorter.set_count(number_of_hits)
    sorter.set_tdc_pointer(tdc_ns)

    #sorter.set_use_reflection_filter_on_u1(False) # Achim recommended False
    #sorter.set_use_reflection_filter_on_u2(False)

    if command >= 2:
        sorter.create_scalefactors_calibrator(True,\
                                              sorter.runtime_u,\
                                              sorter.runtime_v,\
                                              sorter.runtime_w, 0.78,\
                                              sorter.fu, sorter.fv, sorter.fw)

    error_code = sorter.init_after_setting_parameters()
    if error_code:
        print "sorter could not be initialized\n"
        error_text = sorter.get_error_text(error_code, 512)
        print 'Error %d: %s' % (error_code, error_text)
        sys.exit(0)

    print "Calibration factors:\n  f_U (mm/ns) =%f\n  f_V (mm/ns) =%f\n  f_W (mm/ns) =%f\n  Offset on layer W (ns) =%f\n"%\
          (2*sorter.fu, 2*sorter.fv, 2*sorter.fw, w_offset)

    print "ok for sorter initialization\n"

    print "reading event data... \n"

    evnum = 0
    t_sec = time()
    t1_sec = time()
    while DIO.read_next_event():

        evnum = DIO.event_number()

        if evnum < EVSKIP: continue
        if evnum > EVENTS: break

        if do_print(evnum):
            t1 = time()
            print 'Event: %06d, dt(sec): %.3f' % (evnum, t1 - t1_sec)
            t1_sec = t1

        #==================================
        # TODO by end user:
        # Here you must read in a data block from your data file
        # and fill the array tdc_ns[][] and number_of_hits[]

        #nhits = np.zeros((NUMBER_OF_CHANNELS,), dtype=np.int32)
        DIO.get_number_of_hits_array(number_of_hits)
        if DIO.error_flag():
            error_text = DIO.get_error_text(DIO.error_flag())
            print "DIO Error %d: %s" % (DIO.error_flag(), error_text)
            sys.exit(0)
        if VERBOSE: print '   number_of_hits_array', number_of_hits[:8]

        DIO.get_tdc_data_array(tdc_ns)

        if DIO.error_flag():
            error_text = DIO.get_error_text(DIO.error_flag())
            print "DIO Error %d: %s" % (DIO.error_flag(), error_text)
            sys.exit(0)

        if VERBOSE: print '   TDC data:\n', tdc_ns[0:8, 0:5]

        # apply conversion of times to ns
        if False:  # DIO returns tdc_ns already in [ns]
            tdc_ns *= DIO.tdc_resolution()

        #==================================
        # NHITS - number of hits per channel
        if True:
            nhits_u1 = number_of_hits[Cu1]
            nhits_u2 = number_of_hits[Cu2]
            nhits_v1 = number_of_hits[Cv1]
            nhits_v2 = number_of_hits[Cv2]
            nhits_w1 = number_of_hits[Cw1]
            nhits_w2 = number_of_hits[Cw2]
            nhits_mcp = number_of_hits[Cmcp]

        # TIME_CH - time of the 1-st hit
        if True:
            t0_u1 = tdc_ns[Cu1, 0]
            t0_u2 = tdc_ns[Cu2, 0]
            t0_v1 = tdc_ns[Cv1, 0]
            t0_v2 = tdc_ns[Cv2, 0]
            t0_w1 = tdc_ns[Cw1, 0]
            t0_w2 = tdc_ns[Cw2, 0]
            t0_mcp = tdc_ns[Cmcp, 0]

        # REFLECTIONS
        if True:
            if number_of_hits[Cu2] > 1:
                refl_u1 = tdc_ns[Cu2, 1] - tdc_ns[Cu1, 0]
            if number_of_hits[Cu1] > 1:
                refl_u2 = tdc_ns[Cu1, 1] - tdc_ns[Cu2, 0]
            if number_of_hits[Cv2] > 1:
                refl_v1 = tdc_ns[Cv2, 1] - tdc_ns[Cv1, 0]
            if number_of_hits[Cv1] > 1:
                refl_v2 = tdc_ns[Cv1, 1] - tdc_ns[Cv2, 0]
            if number_of_hits[Cw2] > 1:
                refl_w1 = tdc_ns[Cw2, 1] - tdc_ns[Cw1, 0]
            if number_of_hits[Cw1] > 1:
                refl_w2 = tdc_ns[Cw1, 1] - tdc_ns[Cw2, 0]

        # TIME_SUMS
        time_sum_u = tdc_ns[Cu1, 0] + tdc_ns[Cu2, 0] - 2 * tdc_ns[Cmcp, 0]
        time_sum_v = tdc_ns[Cv1, 0] + tdc_ns[Cv2, 0] - 2 * tdc_ns[Cmcp, 0]
        time_sum_w = tdc_ns[Cw1, 0] + tdc_ns[Cw2, 0] - 2 * tdc_ns[Cmcp, 0]

        # UVW
        u_ns = tdc_ns[Cu1, 0] - tdc_ns[Cu2, 0]
        v_ns = tdc_ns[Cv1, 0] - tdc_ns[Cv2, 0]
        w_ns = tdc_ns[Cw1, 0] - tdc_ns[Cw2, 0]

        u = u_ns * sorter.fu
        v = v_ns * sorter.fv
        w = (w_ns + w_offset) * sorter.fw

        Xuv = u
        Xuw = u
        Xvw = v + w
        Yuv = (u - 2 * v) * OSQRT3
        Yuw = (2 * w - u) * OSQRT3
        Yvw = (w - v) * OSQRT3

        dX = Xuv - Xvw
        dY = Yuv - Yvw
        Deviation = sqrt(dX * dX + dY * dY)

        if sorter.use_hex:
            # shift the time sums to zero:
            sorter.shift_sums(+1, offset_sum_u, offset_sum_v, offset_sum_w)
            #shift layer w so that the middle lines of all layers intersect in one point:
            sorter.shift_layer_w(+1, w_offset)
        else:
            # shift the time sums to zero:
            sorter.shift_sums(+1, offset_sum_u, offset_sum_v)

# shift all signals from the anode so that the center of the detector is at x=y=0:
        sorter.shift_position_origin(+1, pos_offset_x, pos_offset_y)

        sorter.feed_calibration_data(
            True, w_offset
        )  # for calibration of fv, fw, w_offset and correction tables

        #DIO.get_tdc_data_array(tdc_ns)

        time_sum_u_corr = tdc_ns[Cu1, 0] + tdc_ns[Cu2, 0] - 2 * tdc_ns[Cmcp, 0]
        time_sum_v_corr = tdc_ns[Cv1, 0] + tdc_ns[Cv2, 0] - 2 * tdc_ns[Cmcp, 0]
        time_sum_w_corr = tdc_ns[Cw1, 0] + tdc_ns[Cw2, 0] - 2 * tdc_ns[Cmcp, 0]

        #print 'map_is_full_enough', hexanode.py_sorter_scalefactors_calibration_map_is_full_enough(sorter)
        sfco = hexanode.py_scalefactors_calibration_class(sorter)

        # break loop if statistics is enough
        if sfco:
            if sfco.map_is_full_enough():
                print 'sfo.map_is_full_enough(): %s  event number: %06d' % (
                    sfco.map_is_full_enough(), evnum)
                break

        # XY_RESOLUTION :
        if True:
            #print "    binx: %d  biny: %d  resolution(FWHM): %.6f" % (sfco.binx, sfco.biny, sfco.detector_map_resol_FWHM_fill)
            if sfco.binx >= 0 and sfco.biny >= 0:
                binx = sfco.binx
                biny = sfco.biny
                resol_fwhm = sfco.detector_map_resol_FWHM_fill

        # Sort the TDC-Data and reconstruct missing signals and apply the sum- and NL-correction.
        # number_of_particles is the number of reconstructed particles
        #========================================================
        number_of_particles = sorter.sort() if command == 1 else\
                                  sorter.run_without_sorting()
        #========================================================

        if True:
            print "  Event %5i  number_of_particles: %i" % (
                evnum, number_of_particles)
            for i in range(number_of_particles):
                hco = hexanode.py_hit_class(sorter, i)
                print "    p:%1i x:%.3f y:%.3f t:%.3f met:%d" % (
                    i, hco.x, hco.y, hco.time, hco.method)
            print "    part1 u:%.3f v:%.3f w:%.3f" % (u, v, w)

        #-------------------------
        # TODO by the end user..."

        if number_of_particles < 1: continue

        hco = hexanode.py_hit_class(sorter, 0)

        # MISC
        if False:
            # fill Consistence Indicator
            consistenceIndicator = 0
            for (ind, incr) in inds_incr:
                if number_of_hits[ind] > 0: consistenceIndicator += incr
            consist_indicator = consistenceIndicator

            rec_method = hco.method
            #print 'reconstruction method %d' % hco.method

        # XY_2D :
        if False:
            # fill 2-d images
            x1, y1 = hco.x, hco.y

            x2, y2 = (-10, -10)
            if number_of_particles > 1:
                hco2 = hexanode.py_hit_class(sorter, 1)
                x2, y2 = hco2.x, hco2.y

            ix1, ix2, ixuv, ixuw, ixvw = img_x_bins.bin_indexes(
                (x1, x2, Xuv, Xuw, Xvw))
            iy1, iy2, iyuv, iyuw, iyvw = img_y_bins.bin_indexes(
                (y1, y2, Yuv, Yuw, Yvw))

            img_xy_1[iy1, ix1] += 1
            img_xy_2[iy2, ix2] += 1
            img_xy_uv[iyuv, ixuv] += 1
            img_xy_uw[iyuw, ixuw] += 1
            img_xy_vw[iyvw, ixvw] += 1

        # PHYSICS :
        if False:
            if number_of_hits[Cmcp] > 1:
                t0, t1 = tdc_ns[Cmcp, :2]
                it0, it1 = t_ns_bins.bin_indexes((t0, t1))
                t1_vs_t0[it1, it0] += 1

                ix, iy = x_mm_bins.bin_indexes((Xuv, Yuv))
                #iy = y_mm_bins.bin_indexes((Yuv,))
                x_vs_t0[ix, it0] += 1
                y_vs_t0[iy, it0] += 1

#   	// write the results into a new data file.
#   	// the variable "number_of_particles" contains the number of reconstructed particles.
#   	// the x and y (in mm) and TOF (in ns) is stored in the array sorter->output_hit_array:

#   	// for the i-th particle (i starts from 0):
#       // hco= hexanode.py_hit_class(sorter, i)
#       // hco.x, hco.y, hco.time

#   	// for each particle you can also retrieve the information about how the particle
#   	// was reconstructed (tog et some measure of the confidence):
#   	// hco.method

#   end of the event loop

    if command == 2:
        print "calibrating detector... "
        sorter.do_calibration()
        print "ok - after do_calibration"
        sfco = hexanode.py_scalefactors_calibration_class(sorter)
        if sfco:
            print "Good calibration factors are:\n  f_U =%f\n  f_V =%f\n  f_W =%f\n  Offset on layer W=%f\n"%\
                  (2*sorter.fu, 2*sfco.best_fv, 2*sfco.best_fw, sfco.best_w_offset)

    if command == 3:  # generate and print correction tables for sum- and position-correction
        print "creating calibration tables..."
        status = hexanode.py_create_calibration_tables(CALIBTAB, sorter)
        print "finished creating calibration tables: %s status %s" % (CALIBTAB,
                                                                      status)

    print "consumed time (sec) = %.6f\n" % (time() - t_sec)

    if sorter is not None: del sorter