Beispiel #1
0
def issue_2021_02_03():
    """Data access example for confluence
     run, step, event loops
  """
    class Arguments:
        expt = 'ueddaq02'
        run = 66
        evtmax = 5
        detname = 'epixquad'

    args = Arguments()

    from psana.detector.NDArrUtils import info_ndarr
    from psana import DataSource
    ds = DataSource(exp=args.expt,
                    run=args.run,
                    dir=f'/cds/data/psdm/{args.expt[:3]}/{args.expt}/xtc')

    for irun, run in enumerate(ds.runs()):
        print('\n==== %02d run: %d exp: %s detnames: %s' %
              (irun, run.runnum, run.expt, ','.join(run.detnames)))

        print('make %s detector object' % args.detname)
        det = run.Detector(args.detname)

        for istep, step in enumerate(run.steps()):
            print('\nStep %1d' % istep)

            for ievt, evt in enumerate(step.events()):
                if ievt > args.evtmax:
                    exit('exit by number of events limit %d' % args.evtmax)

                print('%s\nEvent %04d' % (80 * '_', ievt))
                segs = det.raw._segment_numbers(evt)
                raw = det.raw.raw(evt)

                print(info_ndarr(segs, 'segsments '))
                print(info_ndarr(raw, 'raw '))
Beispiel #2
0
def plot_image(nda, tit=''):
    """Plots averaged image
    """
    from psana.detector.UtilsGraphics import gr

    #img = det.image(evt, nda)
    img = reshape_to_2d(nda)
    if img is None:
        print('plot_image - image "%s" is not available.' % tit)
        return

    logger.info(info_ndarr(img, 'plot_image of %s' % tit))

    amin = np.quantile(img, 0.01, interpolation='lower')
    amax = np.quantile(img, 0.99, interpolation='higher')
    gr.plotImageLarge(img, amp_range=(amin, amax), title=tit)
    gr.show()
Beispiel #3
0
    def add_event(self, raw, irec):
        logger.debug(info_ndarr(raw, 'add_event %3d raw' % irec))
        #raw = raw & M14

        cond_lo = raw < self.gate_lo
        cond_hi = raw > self.gate_hi
        condlist = (np.logical_not(np.logical_or(cond_lo, cond_hi)), )

        raw_f64 = raw.astype(np.float64)

        self.arr_sum0 += np.select(condlist, (self.arr1u64, ), 0)
        self.arr_sum1 += np.select(condlist, (raw_f64, ), 0)
        self.arr_sum2 += np.select(condlist, (np.square(raw_f64), ), 0)

        self.sta_int_lo += np.select((cond_lo, ), (self.arr1u64, ), 0)
        self.sta_int_hi += np.select((cond_hi, ), (self.arr1u64, ), 0)

        np.maximum(self.arr_max, raw, out=self.arr_max)
        np.minimum(self.arr_min, raw, out=self.arr_min)
Beispiel #4
0
def test_mask_select(tname, mo):
    mask = None
    if tname == '9':
        import psana.pyalgos.generic.NDArrGenerators as ag
        status = mo.mask_from_status()
        sh = status.shape  # (4, 352, 384)
        mask = ag.random_standard(shape=sh, mu=0, sigma=0.25, dtype=float)

    elif tname == '1':
        mask = mo.mask_from_status()  # status_bits=0xffff, gain_range_inds=(0,1,2,3,4), dtype=DTYPE_MASK, **kwa)

    elif tname == '2':
        msts = mo.mask_from_status()
        mask = mo.mask_neighbors(msts, rad=9, ptrn='r')

    elif tname == '3':
        mask = mo.mask_edges(width=0, edge_rows=10, edge_cols=5)  # dtype=DTYPE_MASK, **kwa)

    elif tname == '4':
        mask = mo.mask_center(wcenter=0, center_rows=5, center_cols=3)  # dtype=DTYPE_MASK, **kwa)

    elif tname == '5':
        mask = mo.mask_calib_or_default()  # dtype=DTYPE_MASK)

    elif tname == '6':
        mask = test_umask(mo)

    elif tname == '7':
        mask = mo.mask_comb(\
                    status=True, status_bits=0xffff, gain_range_inds=(0,1,2,3,4),\
                    neighbors=True, rad=5, ptrn='r',\
                    edges=True, width=0, edge_rows=10, edge_cols=5,\
                    center=True, wcenter=0, center_rows=5, center_cols=3,\
                    calib=True,\
                    umask=test_umask(mo),\
                    force_update=False)
    else:
        mask = None

    logger.info(info_ndarr(mask, 'mask'))
    return mask
Beispiel #5
0
def status_as_mask(status, status_bits=0xffff, dtype=DTYPE_MASK, **kwa):
    """Returns per-pixel array of mask generated from pixel_status.

       Parameters

       - status  : np.array - pixel_status calibration constants
       - status_bits : bitword for mask status codes
       - dtype : mask np.array dtype

       Returns

       - np.array - mask generated from calibration type pixel_status (1/0 for status 0/status_bits>0, respectively).
    """
    if not isinstance(status, np.ndarray):
        logger.debug('status is not np.ndarray - return None')
        return None

    from psana.detector.NDArrUtils import info_ndarr
    logger.debug(info_ndarr(status, 'status'))
    cond = (status & status_bits) > 0
    return np.asarray(np.select((cond, ), (0, ), default=1), dtype=dtype)
Beispiel #6
0
def issue_2022_02_08():
    """test copy xtc2 file to .../public01/xtc/
    cd /cds/data/psdm/prj/public01/xtc/
    cp /cds/data/psdm/tmo/tmoc00318/xtc/tmoc00318-r0010-s000-c000.xtc2 .
    sudo chown psdatmgr tmoc00318-r0010-s000-c000.xtc2
    the same for smalldata/
    """
    from time import time
    from psana.detector.NDArrUtils import info_ndarr
    from psana import DataSource

    ds = DataSource(exp='tmoc00318',run=10, dir='/cds/data/psdm/prj/public01/xtc')
    orun = next(ds.runs())
    det = orun.Detector('epix100')

    for i,evt in enumerate(orun.events()):
        if i>20: break
        t0_sec = time()
        arr = det.raw.calib(evt, cmpars=(0,7,100,10)) # None or 0/1/2/4/7 : dt=0.02/0.036/0.049/0.016/0.90 sec
        #arr = det.raw._common_mode_increment(evt, cmpars=(0,7,100,10))
        print(info_ndarr(arr, 'Ev.%3d dt=%.3f sec  det.raw.calib(evt, cmpars=(0,7,100,10)): '%(i, time()-t0_sec)))
Beispiel #7
0
def issue_2022_03_02():
    """epix100 default geometry implementation
    """
    from psana.pscalib.geometry.GeometryAccess import GeometryAccess
    from psana.detector.NDArrUtils import info_ndarr
    from psana import DataSource

    #ds = DataSource(exp='tmox49720',run=209)
    #orun = next(ds.runs())
    #det = orun.Detector('epix100')

    ds = DataSource(exp='rixx45619',run=119)
    orun = next(ds.runs())
    det = orun.Detector('epixhr')

    for nevt,evt in enumerate(orun.events()):
        geotxt = det.raw._det_geotxt_default()
        print('_det_geotxt_default:\n%s' % geotxt)
        o = GeometryAccess()
        o.load_pars_from_str(geotxt)
        x,y,z = o.get_pixel_coords()
        print(info_ndarr(x,'x:'))
        if det.raw.image(evt) is not None: break
Beispiel #8
0
def issue_2021_03_13_full():
    """np.median and quntile return rounded values...
  """
    from time import time
    import numpy as np
    from psana.detector.NDArrUtils import info_ndarr
    #a = np.arange(12).reshape((3, 4))
    mu, sigma = 32, 3
    arr3d = mu + sigma * np.random.standard_normal(
        size=(100, 352, 384)).astype(dtype=np.float64)
    print(info_ndarr(arr3d, 'arr3d simulated ', last=20))

    arr3d = arr3d.astype(np.uint16)  #+ 0.1
    print(info_ndarr(arr3d, 'arr3d dtype u16 ', last=20))

    fraclo, frachi = 0.05, 0.95

    t0_sec = time()
    #arr_med = np.median(arr3d, axis=0)
    arr_med = np.quantile(arr3d, 0.5, axis=0, interpolation='linear')
    print('median/quantile(0.5) time = %.3f sec' % (time() - t0_sec))
    arr_qlo = np.quantile(arr3d, fraclo, axis=0, interpolation='linear')
    arr_qhi = np.quantile(arr3d, frachi, axis=0, interpolation='linear')
    arr_dev_3d = arr3d[:, ] - arr_med  # .astype(dtype=np.float64)
    arr_abs_dev = np.median(np.abs(arr_dev_3d), axis=0)

    print(info_ndarr(arr_med, 'arr_med ', last=20))
    print(info_ndarr(arr_qlo, 'arr_qlo ', last=20))
    print(info_ndarr(arr_qhi, 'arr_qhi ', last=20))
    print(info_ndarr(arr_abs_dev, 'arr_abs_dev ', last=20))

    med_med = np.median(arr_med)
    med_qlo = np.median(arr_qlo)
    med_qhi = np.median(arr_qhi)
    med_abs_dev = np.median(arr_abs_dev)

    s = 'Pre-processing time %.3f sec' % (time()-t0_sec)\
      + '\nResults for median over pixels intensities:'\
      + '\n    %.3f fraction of the event spectrum is below %.3f ADU - pedestal estimator' % (0.5, med_med)\
      + '\n    %.3f fraction of the event spectrum is below %.3f ADU - gate low limit' % (fraclo, med_qlo)\
      + '\n    %.3f fraction of the event spectrum is below %.3f ADU - gate upper limit' % (frachi, med_qhi)\
      + '\n    event spectrum spread    median(abs(raw-med)): %.3f ADU - spectral peak width estimator' % med_abs_dev
    print(s)
Beispiel #9
0
def proc_block(block, **kwa):
    """Dark data 1st stage processing to define gate limits.
       block.shape = (nrecs, <raw-detector-shape>),
       where <raw-detector-shape> can be per segment (352, 384) or per detector (nsegs, 352, 384)
       Returns segment/detector shaped arrays of gate_lo, gate_hi, arr_med, arr_abs_dev
    """
    exp = kwa.get('exp', None)
    detname = kwa.get('det', None)
    int_lo = kwa.get('int_lo',
                     1)  # lowest  intensity accepted for dark evaluation
    int_hi = kwa.get('int_hi',
                     16000)  # highest intensity accepted for dark evaluation
    #intnlo     = kwa.get('intnlo', 6.0)     # intensity ditribution number-of-sigmas low
    #intnhi     = kwa.get('intnhi', 6.0)     # intensity ditribution number-of-sigmas high
    #rms_lo     = kwa.get('rms_lo', 0.001)   # rms ditribution low
    #rms_hi     = kwa.get('rms_hi', 16000)   # rms ditribution high
    #rmsnlo     = kwa.get('rmsnlo', 6.0)     # rms ditribution number-of-sigmas low
    #rmsnhi     = kwa.get('rmsnhi', 6.0)     # rms ditribution number-of-sigmas high
    #fraclm     = kwa.get('fraclm', 0.1)     # allowed fraction limit
    fraclo = kwa.get('fraclo',
                     0.05)  # fraction of statistics below low gate limit
    frachi = kwa.get('frachi',
                     0.95)  # fraction of statistics below high gate limit
    frac05 = 0.5
    #nrecs1     = kwa.get('nrecs1', None)    # number of records for the 1st stage processing

    logger.debug('in proc_dark_block for exp=%s det=%s, block.shape=%s' %
                 (exp, detname, str(block.shape)))
    logger.info(
        info_ndarr(block,
                   'begin processing of the data block',
                   first=100,
                   last=105))

    t0_sec = time()

    #nrecs1, ny, nx = block.shape[0]
    nrecs1 = block.shape[0]
    shape = block.shape[1:]  #(ny, nx)
    #if nrecs1 is None or nrecs1>nrecs: nrecs1 = nrecs

    arr1_u16 = np.ones(shape, dtype=np.uint16)
    arr1 = np.ones(shape, dtype=np.uint64)

    t1_sec = time()
    """
    NOTE:
    - our data is uint16.
    - np.median(block, axis=0) or np.quantile(...,interpolation='linear') return result rounded to int
    - in order to return interpolated float values apply the trick:
      data_block + random [0,1)-0.5
    - this would distort data in the range [-0.5,+0.5) ADU, but would allow
      to get better interpolation for median and quantile values
    - use nrecs1 (< nrecs) due to memory and time consumption
    """
    #blockf64 = np.random.random(block.shape) - 0.5 + block
    #logger.debug(info_ndarr(blockf64, '1-st stage conversion uint16 to float64,'\
    #                                 +' add random [0,1)-0.5 time = %.3f sec'%\
    #                                  (time()-t1_sec), first=100, last=105))

    blockf64 = block
    #arr_med = np.median(block, axis=0)
    arr_med = np.quantile(blockf64, frac05, axis=0, interpolation='linear')
    arr_qlo = np.quantile(blockf64, fraclo, axis=0, interpolation='lower')
    arr_qhi = np.quantile(blockf64, frachi, axis=0, interpolation='higher')

    logger.debug(
        'block array median/quantile(frac) for med, qlo, qhi time = %.3f sec' %
        (time() - t1_sec))

    med_med = np.median(arr_med)
    med_qlo = np.median(arr_qlo)
    med_qhi = np.median(arr_qhi)

    arr_dev_3d = block[:, ] - arr_med  # .astype(dtype=np.float64)
    arr_abs_dev = np.median(np.abs(arr_dev_3d), axis=0)
    med_abs_dev = np.median(arr_abs_dev)

    s = 'proc_block pre-processing time %.3f sec' % (time()-t0_sec)\
      + '\n    results for median over pixels intensities:'\
      + '\n    %.3f fraction of the event spectrum is below %.3f ADU - pedestal estimator' % (frac05, med_med)\
      + '\n    %.3f fraction of the event spectrum is below %.3f ADU - gate low limit' % (fraclo, med_qlo)\
      + '\n    %.3f fraction of the event spectrum is below %.3f ADU - gate upper limit' % (frachi, med_qhi)\
      + '\n    event spectrum spread    median(abs(raw-med)): %.3f ADU - spectral peak width estimator' % med_abs_dev
    logger.info(s)

    gate_lo = arr1_u16 * int_lo
    gate_hi = arr1_u16 * int_hi

    gate_lo = np.maximum(np.floor(arr_qlo), gate_lo).astype(dtype=block.dtype)
    gate_hi = np.minimum(np.ceil(arr_qhi), gate_hi).astype(dtype=block.dtype)
    cond = gate_hi > gate_lo
    gate_hi[np.logical_not(cond)] += 1

    logger.debug('proc_block results'\
                +info_ndarr(arr_med,     '\n    arr_med[100:105]', first=100, last=105)\
                +info_ndarr(arr_abs_dev, '\n    abs_dev[100:105]', first=100, last=105)\
                +info_ndarr(gate_lo,     '\n    gate_lo[100:105]', first=100, last=105)\
                +info_ndarr(gate_hi,     '\n    gate_hi[100:105]', first=100, last=105))
    #+info_ndarr(arr_qlo,     '\n    arr_qlo[100:105]', first=100, last=105)\
    #+info_ndarr(arr_qhi,     '\n    arr_qhi[100:105]', first=100, last=105)\

    return gate_lo, gate_hi, arr_med, arr_abs_dev
Beispiel #10
0
def cbits_config_epix10ka(cob, shape=(352, 384)):
    """Creates array of the segment control bits for epix10ka shape=(352, 384)
    from cob=det.raw._seg_configs()[<seg-ind>].config object.
    Returns per panel 4-bit pixel config array with bit assignment]
          0001 = 1<<0 = 1 - T test bit
          0010 = 1<<1 = 2 - M mask bit
          0100 = 1<<2 = 4 - g  gain bit
          1000 = 1<<3 = 8 - ga gain bit
          # add trbit
          010000 = 1<<4 = 16 - trbit

    Parameters
    ----------
    cob : container.Container object
        segment configuration object det.raw._seg_configs()[<seg-ind>].config
        Contains:
        cob.asicPixelConfig: shape:(4, 178, 192) size:136704 dtype:uint8 [12 12 12 12 12...]
        cob.trbit: [1 1 1 1]

    Returns
    -------
    xxxx: np.array, dtype:uint8, ndim=2, shape=(352, 384)
    """
    trbits = cob.trbit  # [1 1 1 1]
    pca = cob.asicPixelConfig  # [:,:176,:] - fixed in daq # shape:(4, 176, 192) size:135168 dtype:uint8 [8 8 8 8 8...]
    logger.debug(
        info_ndarr(cob.asicPixelConfig,
                   'trbits: %s asicPixelConfig:' % str(trbits)))
    rowsh, colsh = int(shape[0] / 2), int(shape[1] /
                                          2)  # should be 176, 192 for epix10ka

    #t0_sec = time()

    # begin to create array of control bits
    # Origin of ASICs in bottom-right corner, so
    # stack them in upside-down matrix and rotete it by 180 deg.

    #cbits = np.flipud(np.fliplr(np.vstack((np.hstack((pca[2],pca[1])),
    #                                       np.hstack((pca[3],pca[0])))))) # 0.000090 sec

    cbits = np.vstack((np.hstack(
        (np.flipud(np.fliplr(pca[2])), np.flipud(np.fliplr(pca[1])))),
                       np.hstack((pca[3], pca[0]))))

    #cbits = np.bitwise_and(cbits,12) # 0o14 (bin:1100) # 0.000202 sec
    np.bitwise_and(cbits, 12, out=cbits)  # 0o14 (bin:1100) # 0.000135 sec

    #logger.debug('TIME for cbits composition = %.6f sec' % (time()-t0_sec))
    #logger.debug(info_ndarr(cbits,'cbits:'))
    #exit('TEST EXIT')

    if all(trbits):
        cbits = np.bitwise_or(cbits,
                              B04)  # add trbit for all pixels (352, 384)
    elif not any(trbits):
        return cbits
    else:  # set trbit per ASIC
        if trbits[2]:
            np.bitwise_or(cbits[:rowsh, :colsh],
                          B04,
                          out=cbits[:rowsh, :colsh])
        if trbits[3]:
            np.bitwise_or(cbits[rowsh:, :colsh],
                          B04,
                          out=cbits[rowsh:, :colsh])
        if trbits[0]:
            np.bitwise_or(cbits[rowsh:, colsh:],
                          B04,
                          out=cbits[rowsh:, colsh:])
        if trbits[1]:
            np.bitwise_or(cbits[:rowsh, colsh:],
                          B04,
                          out=cbits[:rowsh, colsh:])  #0.000189 sec
    return cbits
Beispiel #11
0
def calib_epix10ka_any(det_raw, evt, cmpars=None, **kwa):  #cmpars=(7,2,100)):
    """
    Algorithm
    ---------
    - gets constants
    - gets raw data
    - evaluates (code - pedestal - offset)
    - applys common mode correction if turned on
    - apply gain factor

    Parameters
    ----------
    - det_raw (psana.Detector.raw) - Detector.raw object
    - evt (psana.Event)    - Event object
    - cmpars (tuple) - common mode parameters
          = None - use pars from calib directory
          = cmpars=(<alg>, <mode>, <maxcorr>)
            alg is not used
            mode =0-correction is not applied, =1-in rows, =2-in cols-WORKS THE BEST
            i.e: cmpars=(7,0,100) or (7,2,100)
    - **kwa - used here and passed to det_raw.mask_comb
      - nda_raw - substitute for det_raw.raw(evt)
      - mbits - parameter of the det_raw.mask_comb(...)
      - mask - user defined mask passed as optional parameter

    Returns
    -------
      - calibrated epix10ka data
    """

    logger.debug('in calib_epix10ka_any')

    t0_sec_tot = time()

    nda_raw = kwa.get('nda_raw', None)
    raw = det_raw.raw(
        evt
    ) if nda_raw is None else nda_raw  # shape:(352, 384) or suppose to be later (<nsegs>, 352, 384) dtype:uint16
    if raw is None: return None

    _cmpars = det_raw._common_mode() if cmpars is None else cmpars

    gain = det_raw._gain()  # - 4d gains  (7, <nsegs>, 352, 384)
    peds = det_raw._pedestals()  # - 4d pedestals
    if gain is None: return None  # gain = np.ones_like(peds)  # - 4d gains
    if peds is None: return None  # peds = np.zeros_like(peds) # - 4d gains

    store = dic_store.get(det_raw._det_name, None)

    if store is None:

        logger.info('create new store for %s' % det_raw._det_name)
        store = dic_store[det_raw._det_name] = Storage()

        # do ONCE this initialization
        logger.debug(info_ndarr(raw,  '\n  raw ')\
                    +info_ndarr(gain, '\n  gain')\
                    +info_ndarr(peds, '\n  peds'))

        store.gfac = divide_protected(np.ones_like(gain), gain)
        store.arr1 = np.ones_like(raw, dtype=np.int8)

        logger.debug(info_ndarr(store.gfac, '\n  gfac '))

        # 'FH','FM','FL','AHL-H','AML-M','AHL-L','AML-L'
        #store.gf4 = np.ones_like(raw, dtype=np.int32) * 0.25 # 0.3333 # M - perefierial
        #store.gf6 = np.ones_like(raw, dtype=np.int32) * 1    # L - center

    gfac = store.gfac

    #if store.dcfg is None: store.dcfg = det_raw._config_object() #config_object_det_raw(det_raw)

    gmaps = gain_maps_epix10ka_any(det_raw,
                                   evt)  #tuple: 7 x shape:(4, 352, 384)
    if gmaps is None: return None

    factor = np.select(gmaps,\
                       (gfac[0,:], gfac[1,:], gfac[2,:], gfac[3,:],\
                        gfac[4,:], gfac[5,:], gfac[6,:]), default=1) # 2msec

    pedest = np.select(gmaps,\
                       (peds[0,:], peds[1,:], peds[2,:], peds[3,:],\
                        peds[4,:], peds[5,:], peds[6,:]), default=0)

    #factor, pedest = test_event_constants_for_gmaps(det_raw, evt, gfac, peds) # 6msec
    #factor, pedest = test_event_constants_for_grinds(det_raw, evt, gfac, peds) # 12msec

    store.counter += 1
    if not store.counter % 100:
        logger.debug(info_gain_mode_arrays(gmaps))
        logger.debug(info_pixel_gain_mode_statistics(gmaps))

    logger.debug('TOTAL consumed time (sec = %.6f' % (time() - t0_sec_tot))

    arrf = np.array(raw & det_raw._data_bit_mask, dtype=np.float32) - pedest

    logger.debug('common-mode correction parameters cmpars: %s' % str(_cmpars))

    if store.mask is None:
        #        mbits = kwa.pop('mbits',1) # 1-mask from status, etc.
        #        mask = det_raw._mask_comb(mbits=mbits, **kwa) if mbits > 0 else None
        #        mask_opt = kwa.get('mask',None) # mask optional parameter in det_raw.calib(...,mask=...)
        #        store.mask = mask if mask_opt is None else mask_opt if mask is None else merge_masks(mask,mask_opt)
        store.mask = det_raw._mask_from_status(**kwa)

    mask = store.mask if store.mask is not None else np.ones_like(
        raw, dtype=DTYPE_MASK)

    #logger.debug(info_ndarr(arrf,  'arrf:'))
    #logger.debug(info_ndarr(mask,  'mask:'))

    if _cmpars is not None:
        alg, mode, cormax = int(_cmpars[0]), int(_cmpars[1]), _cmpars[2]
        npixmin = _cmpars[3] if len(_cmpars) > 3 else 10
        if mode > 0:
            t0_sec_cm = time()
            arr1 = store.arr1  # np.ones_like(mask, dtype=np.uint8)
            gr0, gr1, gr2, gr3, gr4, gr5, gr6 = gmaps
            grhm = np.select(
                (gr0, gr1, gr3, gr4),
                (arr1, arr1, arr1, arr1), default=0) if alg == 7 else arr1
            gmask = np.bitwise_and(grhm, mask) if mask is not None else grhm
            #logger.debug(info_ndarr(arr1, '\n  arr1'))
            #logger.debug(info_ndarr(grhm, 'XXXX grhm'))
            #logger.debug(info_ndarr(gmask, 'XXXX gmask'))
            #logger.debug('common-mode mask massaging (sec) = %.6f' % (time()-t2_sec_cm)) # 5msec
            logger.debug(info_ndarr(gmask, 'gmask')\
                         + '\n  per panel statistics of cm-corrected pixels: %s' % str(np.sum(gmask, axis=(1,2), dtype=np.uint32)))

            #sh = (nsegs, 352, 384)
            hrows = 176  # int(352/2)
            for s in range(arrf.shape[0]):

                if mode & 4:  # in banks: (352/2,384/8)=(176,48) pixels
                    common_mode_2d_hsplit_nbanks(arrf[s, :hrows, :],
                                                 mask=gmask[s, :hrows, :],
                                                 nbanks=8,
                                                 cormax=cormax,
                                                 npix_min=npixmin)
                    common_mode_2d_hsplit_nbanks(arrf[s, hrows:, :],
                                                 mask=gmask[s, hrows:, :],
                                                 nbanks=8,
                                                 cormax=cormax,
                                                 npix_min=npixmin)

                if mode & 1:  # in rows per bank: 384/8 = 48 pixels # 190ms
                    common_mode_rows_hsplit_nbanks(arrf[s, ],
                                                   mask=gmask[s, ],
                                                   nbanks=8,
                                                   cormax=cormax,
                                                   npix_min=npixmin)

                if mode & 2:  # in cols per bank: 352/2 = 176 pixels # 150ms
                    common_mode_cols(arrf[s, :hrows, :],
                                     mask=gmask[s, :hrows, :],
                                     cormax=cormax,
                                     npix_min=npixmin)
                    common_mode_cols(arrf[s, hrows:, :],
                                     mask=gmask[s, hrows:, :],
                                     cormax=cormax,
                                     npix_min=npixmin)

            logger.debug('TIME common-mode correction = %.6f sec for cmp=%s' %
                         (time() - t0_sec_cm, str(_cmpars)))

    return arrf * factor if mask is None else arrf * factor * mask  # gain correction
Beispiel #12
0
def cbits_config_epixhr2x2(cob, shape=(288, 384)):
    """Creates array of the segment control bits for epixhr2x2 shape=(288, 384)
    from cob=det.raw._seg_configs()[<seg-ind>].config object.
    Returns per panel 4-bit pixel config array with bit assignment]
          0001 = 1<<0 = 1 - T test bit
          0010 = 1<<1 = 2 - M mask bit
          0100 = 1<<2 = 4 - g  gain bit
          1000 = 1<<3 = 8 - ga gain bit
          # add trbit
          010000 = 1<<4 = 16 - trbit

    Parameters
    ----------
    cob : container.Container object
        segment configuration object det.raw._seg_configs()[<seg-ind>].config
        Contains:
        cob.asicPixelConfig shape:(110592,) size:110592 dtype:uint8 [0 0 0 0 0...]
        cob.trbit: [1 1 1 1]

    ASIC map of epixhr2x2 (Matt)
      A1 | A3
     ----+----
      A0 | A2

    Returns
    -------
    xxxx: np.array, dtype:uint8, ndim=2, shape=(288, 384)
    """
    #t0_sec = time()
    trbits = cob.trbit  # [1 1 1 1]
    pca = cob.asicPixelConfig  # shape:(110592,)
    rowsh, colsh = int(shape[0] / 2), int(
        shape[1] / 2)  # should be 144, 192 for epixhr2x2
    logger.debug(
        info_ndarr(
            cob.asicPixelConfig, 'shape: %s trbits: %s asicPixelConfig:' %
            (str(shape), str(trbits))))

    cbits = np.bitwise_and(
        pca, 12, out=None)  # copy and mask non-essential bits 0o14 (bin:1100)
    cbits.shape = shape

    #logger.info('TIME1 in cbits_config_epixhr2x2 = %.6f sec' % (time()-t0_sec)) # 0.000206 sec

    if all(trbits):
        cbits = np.bitwise_or(cbits,
                              B04)  # add trbit for all pixels (288, 384)
    elif not any(trbits):
        return cbits
    else:  # set trbit per ASIC
        if trbits[1]:
            np.bitwise_or(cbits[:rowsh, :colsh],
                          B04,
                          out=cbits[:rowsh, :colsh])
        if trbits[0]:
            np.bitwise_or(cbits[rowsh:, :colsh],
                          B04,
                          out=cbits[rowsh:, :colsh])
        if trbits[3]:
            np.bitwise_or(cbits[:rowsh, colsh:],
                          B04,
                          out=cbits[:rowsh, colsh:])
        if trbits[2]:
            np.bitwise_or(cbits[rowsh:, colsh:],
                          B04,
                          out=cbits[rowsh:, colsh:])

    #logger.info('TIME2 in cbits_config_epixhr2x2 = %.6f sec' % (time()-t0_sec))
    return cbits
Beispiel #13
0
def issue_2022_01_26():
    """The same as issue_2022_01_21 but for run 10, print ndarray, access constants.
    """
    from psana.detector.NDArrUtils import info_ndarr
    from psana import DataSource
    ds = DataSource(exp='tmoc00318',run=10)
    orun = next(ds.runs())
    det = orun.Detector('epix100')

    print('dir(det.raw):', dir(det.raw))
    print()
    print(info_ndarr(det.raw._pedestals(),   'det.raw._pedestals()  '))
    print(info_ndarr(det.raw._gain(),        'det.raw._gain()'))
    print(info_ndarr(det.raw._rms(),         'det.raw._rms()'))
    print(info_ndarr(det.raw._status(),      'det.raw._status()'))
    print(info_ndarr(det.raw._mask_calib(),  'det.raw._mask_calib()'))
    print(info_ndarr(det.raw._mask_from_status(),  'det.raw._mask_from_status()'))
    print(info_ndarr(det.raw._mask_edges(),  'det.raw._mask_edges()'))
    print(info_ndarr(det.raw._common_mode(), 'det.raw._common_mode()'))
    #print(info_ndarr(det.raw.,   'det.raw.'))
    print(info_ndarr(det.raw._pixel_coords(do_tilt=True, cframe=0), 'det.raw._pixel_coords(...)'))

    print()

    for nevt,evt in enumerate(orun.events()):
        if nevt>10:
            print('event loop is terminated by maximal number of events')
            break
        print(info_ndarr(det.raw.raw(evt),   'det.raw.raw(evt)  '))
        print(info_ndarr(det.raw.calib(evt), 'det.raw.calib(evt)'))
Beispiel #14
0
def merge_panel_gain_ranges(dir_ctype, panel_id, ctype, tstamp, shape, dtype, ofname, fmt='%.3f', fac_mode=0o777):

    logger.debug('In merge_panel_gain_ranges for\n  dir_ctype: %s\n  id: %s\n  ctype=%s tstamp=%s shape=%s dtype=%s fmt=%s'%\
                 (dir_ctype, panel_id, ctype, str(tstamp), str(shape), str(dtype), str(fmt)))

    #dtype = np.uint64 if ctype in ('status', ) else np.float32
    nda_def = np.ones(shape, dtype=dtype) if ctype in ('gain', 'gainci', 'rms') else\
              np.zeros(shape, dtype=dtype)

    lstnda = []
    for igm,gm in enumerate(ue.GAIN_MODES):
        fname = None if gm in ue.GAIN_MODES[5:] and ctype in ('status', 'rms') else\
                find_file_for_timestamp(dir_ctype, '%s_%s' % (ctype,gm), tstamp)
        nda = np.loadtxt(fname, dtype=dtype) if fname is not None else\
              nda_def*GAIN_FACTOR_DEF[igm] if ctype in ('gain', 'gainci') else\
              nda_def

        # normalize gains for ctype 'gainci'
        if fname is not None and ctype == 'gainci':
            med_nda = np.median(nda)
            dir_gain = dir_ctype
            if med_nda != 0:
                f_adu_to_kev = 0

                if gm in GAIN_MODES_IN: # 'FH','FM','FL','AHL-H','AML-M' # 'AHL-L','AML-L'
                    f_adu_to_kev = GAIN_FACTOR_DEF[igm] / med_nda
                    nda = nda * f_adu_to_kev

                elif gm=='AHL-L':
                    #gain_hl_l = load_panel_constants(dir_gain, 'gainci_AHL-L', tstamp)
                    gain_hl_h = load_panel_constants(dir_gain, 'gainci_AHL-H', tstamp)
                    if gain_hl_h is None: continue
                    med_hl_h = np.median(gain_hl_h)
                    #V1
                    #ratio_lh = med_nda/med_hl_h if med_hl_h>0 else 0
                    #f_adu_to_kev = ratio_lh * GAIN_FACTOR_DEF[3] / med_nda
                    f_adu_to_kev = GAIN_FACTOR_DEF[3] / med_hl_h if med_hl_h>0 else 0
                    nda *= f_adu_to_kev
                    #V2
                    #nda = GAIN_FACTOR_DEF[3] * divide_protected(nda, gain_hl_h)

                elif gm=='AML-L':
                    #gain_ml_l = load_panel_constants(dir_gain, 'gainci_AML-L', tstamp)
                    gain_ml_m = load_panel_constants(dir_gain, 'gainci_AML-M', tstamp)
                    if gain_ml_m is None: continue
                    med_ml_m = np.median(gain_ml_m)
                    #V1
                    #ratio_lm = med_nda/med_ml_m if med_ml_m>0 else 0
                    #f_adu_to_kev = ratio_lm * GAIN_FACTOR_DEF[4] / med_nda
                    f_adu_to_kev = GAIN_FACTOR_DEF[4] / med_ml_m if med_ml_m>0 else 0
                    nda *= f_adu_to_kev
                    #V2
                    #nda = GAIN_FACTOR_DEF[4] * divide_protected(nda, gain_ml_m)

                    #logger.info('XXXX gm',gm)
                    #logger.info('XXXX med_nda',med_nda)
                    #logger.info('XXXX med_ml_m',med_ml_m)
                    #logger.info('XXXX GAIN_FACTOR_DEF[4]',GAIN_FACTOR_DEF[4])
                    #logger.info('XXXX ratio_lh',ratio_lh)
                    #logger.info('XXXX f_adu_to_kev',f_adu_to_kev)

        lstnda.append(nda if nda is not None else nda_def)
        #logger.debug(info_ndarr(nda, 'nda for %s' % gm))
        #logger.info('%5s : %s' % (gm,fname))

    logger.debug('merge per-gain-range data in segment nda:\n'+'\n'.join([info_ndarr(a,'    ') for a in lstnda]))

    nda = np.stack(tuple(lstnda))
    logger.debug('merge_panel_gain_ranges - merged with shape %s' % str(nda.shape))

    shape_merged = (7, 1) + shape # (7, 1, 352, 384)
    nda.shape = shape_merged
    logger.debug(info_ndarr(nda, 'merged %s'%ctype))
    save_ndarray_in_textfile(nda, ofname, fac_mode, fmt)

    nda.shape = shape_merged # (7, 1, 352, 384) because save_ndarray_in_textfile changes shape
    return nda
Beispiel #15
0
def proc_dark_block(block, **kwa):
    """DEPRECATED HERE - USE proc_block FROM UtilsCalib.py
       Returns per-panel (352, 384) arrays of mean, rms, ...
       block.shape = (nrecs, 352, 384), where nrecs <= 1024
    """
    exp        = kwa.get('exp', None)
    detname    = kwa.get('det', None)
    int_lo     = kwa.get('int_lo', 1)       # lowest  intensity accepted for dark evaluation
    int_hi     = kwa.get('int_hi', 16000)   # highest intensity accepted for dark evaluation
    intnlo     = kwa.get('intnlo', 6.0)     # intensity ditribution number-of-sigmas low
    intnhi     = kwa.get('intnhi', 6.0)     # intensity ditribution number-of-sigmas high
    rms_lo     = kwa.get('rms_lo', 0.001)   # rms ditribution low
    rms_hi     = kwa.get('rms_hi', 16000)   # rms ditribution high
    rmsnlo     = kwa.get('rmsnlo', 6.0)     # rms ditribution number-of-sigmas low
    rmsnhi     = kwa.get('rmsnhi', 6.0)     # rms ditribution number-of-sigmas high
    fraclm     = kwa.get('fraclm', 0.1)     # allowed fraction limit
    fraclo     = kwa.get('fraclo', 0.05)    # fraction of statistics below low gate limit
    frachi     = kwa.get('frachi', 0.95)    # fraction of statistics below high gate limit
    frac05     = 0.5
    nrecs1     = kwa.get('nrecs1', None)    # number of records for the 1st stage processing

    logger.debug('in proc_dark_block for exp=%s det=%s, block.shape=%s' % (exp, detname, str(block.shape)))
    logger.info(info_ndarr(block, 'Begin processing of the data block:\n    ', first=100, last=105))
    logger.debug('fraction of statistics for gate limits low: %.3f high: %.3f' % (fraclo, frachi))

    t0_sec = time()

    nrecs, ny, nx = block.shape
    shape = (ny, nx)
    if nrecs1 is None or nrecs1>nrecs: nrecs1 = nrecs

    arr1_u16 = np.ones(shape, dtype=np.uint16)
    arr1     = np.ones(shape, dtype=np.uint64)

    t1_sec = time()

    """
    NOTE:
    - our data is uint16.
    - np.median(block, axis=0) or np.quantile(...,interpolation='linear') return result rounded to int
    - in order to return interpolated float values apply the trick:
      data_block + random [0,1)-0.5
    - this would distort data in the range [-0.5,+0.5) ADU, but would allow to get better interpolation for median and quantile values
    - use nrecs1 (< nrecs) due to memory and time consumption
    """
    #blockf64 = np.random.random(block.shape) - 0.5 + block
    blockf64 = np.random.random((nrecs1, ny, nx)) - 0.5 + block[:nrecs1,:]
    logger.debug(info_ndarr(blockf64, '1-st stage conversion uint16 to float64, add random [0,1)-0.5 time = %.3f sec '%(time()-t1_sec), first=100, last=105))

    t1_sec = time()
    #arr_med = np.median(block, axis=0)
    arr_med = np.quantile(blockf64, frac05, axis=0, interpolation='linear')
    arr_qlo = np.quantile(blockf64, fraclo, axis=0, interpolation='linear')
    arr_qhi = np.quantile(blockf64, frachi, axis=0, interpolation='linear')
    logger.debug('block array median/quantile(0.5) for med, qlo, qhi time = %.3f sec' % (time()-t1_sec))

    med_med = np.median(arr_med)
    med_qlo = np.median(arr_qlo)
    med_qhi = np.median(arr_qhi)

    arr_dev_3d = block[:,] - arr_med # .astype(dtype=np.float64)
    arr_abs_dev = np.median(np.abs(arr_dev_3d), axis=0)
    med_abs_dev = np.median(arr_abs_dev)

    logger.info(info_ndarr(arr_med,     '    arr_med[100:105] ', first=100, last=105))
    logger.info(info_ndarr(arr_qlo,     '    arr_qlo[100:105] ', first=100, last=105))
    logger.info(info_ndarr(arr_qhi,     '    arr_qhi[100:105] ', first=100, last=105))
    logger.info(info_ndarr(arr_abs_dev, '    abs_dev[100:105] ', first=100, last=105))

    s = 'Pre-processing time %.3f sec' % (time()-t0_sec)\
      + '\nResults for median over pixels intensities:'\
      + '\n    %.3f fraction of the event spectrum is below %.3f ADU - pedestal estimator' % (frac05, med_med)\
      + '\n    %.3f fraction of the event spectrum is below %.3f ADU - gate low limit' % (fraclo, med_qlo)\
      + '\n    %.3f fraction of the event spectrum is below %.3f ADU - gate upper limit' % (frachi, med_qhi)\
      + '\n    event spectrum spread    median(abs(raw-med)): %.3f ADU - spectral peak width estimator' % med_abs_dev
    logger.info(s)

    #sys.exit('TEST EXIT')

    logger.debug(info_ndarr(arr_med, '1st iteration proc time = %.3f sec arr_av1' % (time()-t0_sec)))
    #gate_half = nsigma*rms_ave
    #logger.debug('set gate_half=%.3f for intensity gated average, which is %.3f * sigma' % (gate_half,nsigma))
    #gate_half = nsigma*abs_dev_med
    #logger.debug('set gate_half=%.3f for intensity gated average, which is %.3f * abs_dev_med' % (gate_half,nsigma))

    # 2nd loop over recs in block to evaluate gated parameters
    logger.debug('Begin 2nd iteration')

    sta_int_lo = np.zeros(shape, dtype=np.uint64)
    sta_int_hi = np.zeros(shape, dtype=np.uint64)

    arr_max = np.zeros(shape, dtype=block.dtype)
    arr_min = np.ones (shape, dtype=block.dtype) * 0x3fff

    gate_lo    = arr1_u16 * int_lo
    gate_hi    = arr1_u16 * int_hi

    #gate_hi = np.minimum(arr_av1 + gate_half, gate_hi).astype(dtype=block.dtype)
    #gate_lo = np.maximum(arr_av1 - gate_half, gate_lo).astype(dtype=block.dtype)
    gate_lo = np.maximum(arr_qlo, gate_lo).astype(dtype=block.dtype)
    gate_hi = np.minimum(arr_qhi, gate_hi).astype(dtype=block.dtype)
    cond = gate_hi>gate_lo
    gate_hi[np.logical_not(cond)] +=1
    #gate_hi = np.select((cond, np.logical_not(cond)), (gate_hi, gate_hi+1), 0)

    logger.debug(info_ndarr(gate_lo, '    gate_lo '))
    logger.debug(info_ndarr(gate_hi, '    gate_hi '))

    arr_sum0 = np.zeros(shape, dtype=np.uint64)
    arr_sum1 = np.zeros(shape, dtype=np.float64)
    arr_sum2 = np.zeros(shape, dtype=np.float64)

    #blockdbl = np.array(block, dtype=np.float64)

    for nrec in range(nrecs):
        raw    = block[nrec,:]
        rawdbl = raw.astype(dtype=np.uint64) # blockdbl[nrec,:]

        logger.debug('nrec:%03d median(raw-ave): %f' % (nrec, np.median(raw.astype(dtype=np.float64) - arr_med)))
        #logger.debug('nrec:%03d median(raw-ave): %.6f' % (nrec, np.median(raw.astype(dtype=np.float64) - arr_med)))
        #logger.debug(info_ndarr(raw, '  raw     '))
        #logger.debug(info_ndarr(arr_med, '  arr_med '))

        condlist = (np.logical_not(np.logical_or(raw<gate_lo, raw>gate_hi)),)

        arr_sum0 += np.select(condlist, (arr1,), 0)
        arr_sum1 += np.select(condlist, (rawdbl,), 0)
        arr_sum2 += np.select(condlist, (np.square(rawdbl),), 0)

        sta_int_lo += np.select((raw<int_lo,), (arr1,), 0)
        sta_int_hi += np.select((raw>int_hi,), (arr1,), 0)

        arr_max = np.maximum(arr_max, raw)
        arr_min = np.minimum(arr_min, raw)

    arr_av1 = divide_protected(arr_sum1, arr_sum0)
    arr_av2 = divide_protected(arr_sum2, arr_sum0)

    frac_int_lo = np.array(sta_int_lo/nrecs, dtype=np.float32)
    frac_int_hi = np.array(sta_int_hi/nrecs, dtype=np.float32)

    arr_rms = np.sqrt(arr_av2 - np.square(arr_av1))
    #rms_ave = arr_rms.mean()
    rms_ave = mean_constrained(arr_rms, rms_lo, rms_hi)

    rms_min, rms_max = evaluate_limits(arr_rms, rmsnlo, rmsnhi, rms_lo, rms_hi, cmt='RMS')
    ave_min, ave_max = evaluate_limits(arr_av1, intnlo, intnhi, int_lo, int_hi, cmt='AVE')

    arr_sta_rms_hi = np.select((arr_rms>rms_max,),    (arr1,), 0)
    arr_sta_rms_lo = np.select((arr_rms<rms_min,),    (arr1,), 0)
    arr_sta_int_hi = np.select((frac_int_hi>fraclm,), (arr1,), 0)
    arr_sta_int_lo = np.select((frac_int_lo>fraclm,), (arr1,), 0)
    arr_sta_ave_hi = np.select((arr_av1>ave_max,),    (arr1,), 0)
    arr_sta_ave_lo = np.select((arr_av1<ave_min,),    (arr1,), 0)

    logger.info('Bad pixel status:'\
               +'\n  status  1: %8d pixel rms       > %.3f' % (arr_sta_rms_hi.sum(), rms_max)\
               +'\n  status  2: %8d pixel rms       < %.3f' % (arr_sta_rms_lo.sum(), rms_min)\
               +'\n  status  4: %8d pixel intensity > %g in more than %g fraction of events' % (arr_sta_int_hi.sum(), int_hi, fraclm)\
               +'\n  status  8: %8d pixel intensity < %g in more than %g fraction of events' % (arr_sta_int_lo.sum(), int_lo, fraclm)\
               +'\n  status 16: %8d pixel average   > %g'   % (arr_sta_ave_hi.sum(), ave_max)\
               +'\n  status 32: %8d pixel average   < %g'   % (arr_sta_ave_lo.sum(), ave_min)\
               )

    #0/1/2/4/8/16/32 for good/hot-rms/saturated/cold/cold-rms/average above limit/average below limit,
    arr_sta = np.zeros(shape, dtype=np.uint64)
    arr_sta += arr_sta_rms_hi    # hot rms
    arr_sta += arr_sta_rms_lo*2  # cold rms
    arr_sta += arr_sta_int_hi*4  # satturated
    arr_sta += arr_sta_int_lo*8  # cold
    arr_sta += arr_sta_ave_hi*16 # too large average
    arr_sta += arr_sta_ave_lo*32 # too small average

    absdiff_av1_med = np.abs(arr_av1-arr_med)
    logger.debug(info_ndarr(absdiff_av1_med, 'np.abs(arr_av1-arr_med)', first=100, last=105))
    logger.info('estimator of difference between gated average and median np.median(np.abs(arr_av1-arr_med)): %.3f' % np.median(absdiff_av1_med))

    cond = absdiff_av1_med > med_abs_dev
    arr_av1[cond] = arr_med[cond]

    arr_sta_bad = np.select((cond,), (arr1,), 0)
    frac_bad = arr_sta_bad.sum()/float(arr_av1.size)
    logger.debug('fraction of panel pixels with gated average deviated from and replaced by median: %.6f' % frac_bad)

    logger.info('data block processing time = %.3f sec' % (time()-t0_sec))
    logger.debug(info_ndarr(arr_av1, 'arr_av1     [100:105] ', first=100, last=105))
    logger.debug(info_ndarr(arr_rms, 'pixel_rms   [100:105] ', first=100, last=105))
    logger.debug(info_ndarr(arr_sta, 'pixel_status[100:105] ', first=100, last=105))
    logger.debug(info_ndarr(arr_med, 'arr mediane [100:105] ', first=100, last=105))

    return arr_av1, arr_rms, arr_sta
Beispiel #16
0
def deploy_constants(dic_consts, **kwa):

    from psana.pscalib.calib.MDBUtils import data_from_file
    from psana.pscalib.calib.MDBWebUtils import add_data_and_two_docs

    CTYPE_DTYPE = cc.dic_calib_name_to_dtype  # {'pedestals': np.float32,...}

    expname = kwa.get('exp', None)
    detname = kwa.get('det', None)
    dettype = kwa.get('dettype', None)
    deploy = kwa.get('deploy', False)
    dirrepo = kwa.get('dirrepo', './work')
    dirmode = kwa.get('dirmode', 0o777)
    filemode = kwa.get('filemode', 0o666)
    tstamp = kwa.get('tstamp', '2010-01-01T00:00:00')
    tsshort = kwa.get('tsshort', '20100101000000')
    runnum = kwa.get('run_orig', None)

    fmt_peds = kwa.get('fmt_peds', '%.3f')
    fmt_rms = kwa.get('fmt_rms', '%.3f')
    fmt_status = kwa.get('fmt_status', '%4i')

    CTYPE_FMT = {
        'pedestals': fmt_peds,
        'pixel_rms': fmt_rms,
        'pixel_status': fmt_status
    }

    #create_directory(dirrepo, dirmode)
    #fprefix = fname_prefix(detname, tsshort, expname, runnum, dirrepo)

    repoman = RepoManager(dirrepo,
                          dirmode=dirmode,
                          filemode=filemode,
                          dettype=dettype)
    dircons = repoman.makedir_constants(dname='constants')
    fprefix = fname_prefix(detname, tsshort, expname, runnum, dircons)

    for ctype, nda in dic_consts.items():
        fname = '%s-%s.txt' % (fprefix, ctype)
        fmt = CTYPE_FMT.get(ctype, '%.5f')
        #logger.info(info_ndarr(nda, 'constants for %s ' % ctype))
        #logger.info(info_ndarr(nda, 'constants'))
        save_ndarray_in_textfile(nda, fname, filemode, fmt)
        #save_2darray_in_textfile(nda, fname, filemode, fmt)

        dtype = 'ndarray'
        kwa['iofname'] = fname
        kwa['ctype'] = ctype
        kwa['dtype'] = dtype
        kwa['extpars'] = {
            'content': 'extended parameters dict->json->str',
        }
        #kwa['extpars'] = {'content':'other script parameters', 'script_parameters':kwa}
        _ = kwa.pop(
            'exp', None
        )  # remove parameters from kwargs - they passed as positional arguments
        _ = kwa.pop('det', None)

        logger.info('DEPLOY metadata: %s' %
                    info_dict(kwa, fmt='%s: %s', sep='  '))  #fmt='%12s: %s'

        data = data_from_file(fname, ctype, dtype, True)
        logger.info(info_ndarr(data, 'constants loaded from file', last=10))

        if deploy:
            detname = kwa['longname']
            resp = add_data_and_two_docs(
                data, expname, detname,
                **kwa)  # url=cc.URL_KRB, krbheaders=cc.KRBHEADERS
            if resp:
                #id_data_exp, id_data_det, id_doc_exp, id_doc_det = resp
                logger.debug(
                    'deployment id_data_exp:%s id_data_det:%s id_doc_exp:%s id_doc_det:%s'
                    % resp)
            else:
                logger.info('constants are not deployed')
                exit()
        else:
            logger.warning('TO DEPLOY CONSTANTS ADD OPTION -D')
Beispiel #17
0
def test_calib_constants():
    d = dict_calib_constants()
    cc = CalibConstants(d)
    print(info_ndarr(cc.pedestals(), 'pedestals'))
    print(info_ndarr(cc.rms(), 'rms'))
    print(info_ndarr(cc.status(), 'status'))
    print(info_ndarr(cc.mask_calib(), 'mask_calib'))
    print(info_ndarr(cc.common_mode(), 'common_mode'))
    print(info_ndarr(cc.gain(), 'gain'))
    print(info_ndarr(cc.gain_factor(), 'gain_factor'))
    print('geotxt_and_meta', cc.geotxt_and_meta())
    ix, iy = cc.pixel_coord_indexes()
    print(info_ndarr(ix, 'ix:'))
    print(info_ndarr(iy, 'iy:'))
    x, y, z = cc.pixel_coords()
    print(info_ndarr(x, 'x:'))
    print(info_ndarr(y, 'y:'))
    print(info_ndarr(z, 'z:'))

    print('shape_as_daq:', cc.shape_as_daq())
    print('number_of_segments_total:', cc.number_of_segments_total())
Beispiel #18
0
def test_mask_select(tname, det):
    mask = None
    if tname == '9':
        import psana.pyalgos.generic.NDArrGenerators as ag
        sh = det.raw._shape_as_daq()  # (4, 352, 384)
        mask = ag.random_standard(shape=sh, mu=0, sigma=0.25, dtype=float)

    elif tname == '1':
        mask = Mask(det, status=True, neighbors=False, edges=False, center=False, calib=False, umask=None)\
               .mask()

    elif tname == '2':
        mask = Mask(det,\
                    status=True, status_bits=0xffff, gain_range_inds=(0,1,2,3,4),\
                    neighbors=True, rad=5, ptrn='r',\
                    edges=False, \
                    center=False,\
                    calib=False,\
                    umask=None).mask()

    elif tname == '3':
        mask = Mask(det,\
                    status=False,\
                    neighbors=False,\
                    edges=True, width=0, edge_rows=10, edge_cols=5,\
                    center=False,\
                    calib=False,\
                    umask=None).mask()

    elif tname == '4':
        mask = Mask(det,\
                    status=False,\
                    neighbors=False,\
                    edges=False,\
                    center=True, wcenter=0, center_rows=5, center_cols=3,\
                    calib=False,\
                    umask=None,\
                    force_update=False).mask()

    elif tname == '5':
        mask = Mask(det,\
                    status=False,\
                    neighbors=False,\
                    edges=False,\
                    center=False,\
                    calib=True,\
                    umask=None).mask()

    elif tname == '6':
        mask = Mask(det,\
                    status=False,\
                    neighbors=False,\
                    edges=False,\
                    center=False,\
                    calib=False,\
                    umask=test_umask(det)).mask()

    elif tname == '7':
        mask = Mask(det,\
                    status=True, status_bits=0xffff, gain_range_inds=(0,1,2,3,4),\
                    neighbors=True, rad=5, ptrn='r',\
                    edges=True, width=0, edge_rows=10, edge_cols=5,\
                    center=True, wcenter=0, center_rows=5, center_cols=3,\
                    calib=True,\
                    umask=test_umask(det),\
                    force_update=False).mask()

    elif tname == '11':
        mask = Mask(det).mask_from_status(
            status_bits=0xffff)  # dtype=DTYPE_MASK, **kwa)

    elif tname == '12':
        o = Mask(det)
        msts = o.mask_from_status(
            status_bits=0xffff)  # dtype=DTYPE_MASK, **kwa)
        mask = o.mask_neighbors(msts, rad=9, ptrn='r')

    elif tname == '13':
        mask = Mask(det).mask_edges(width=0, edge_rows=10,
                                    edge_cols=5)  # dtype=DTYPE_MASK, **kwa)

    elif tname == '14':
        mask = Mask(det).mask_center(wcenter=0, center_rows=5,
                                     center_cols=3)  # dtype=DTYPE_MASK, **kwa)

    elif tname == '15':
        mask = Mask(det).mask_calib_or_default()  # dtype=DTYPE_MASK)

    elif tname == '16':
        mask = test_umask(det)

    elif tname == '17':
        mask = Mask(det).mask_comb(\
                    status=True, status_bits=0xffff, gain_range_inds=(0,1,2,3,4),\
                    neighbors=True, rad=5, ptrn='r',\
                    edges=True, width=0, edge_rows=10, edge_cols=5,\
                    center=True, wcenter=0, center_rows=5, center_cols=3,\
                    calib=True,\
                    umask=test_umask(det),\
                    force_update=False)  # dtype=DTYPE_MASK)

    elif tname == '21':
        mask = det.raw._mask_from_status(
            status_bits=0xffff)  # dtype=DTYPE_MASK, **kwa)

    elif tname == '22':
        msts = det.raw._mask_from_status(
            status_bits=0xffff)  # dtype=DTYPE_MASK, **kwa)
        mask = det.raw._mask_neighbors(msts, rad=9, ptrn='r')

    elif tname == '23':
        mask = det.raw._mask_edges(width=0, edge_rows=10,
                                   edge_cols=5)  # dtype=DTYPE_MASK, **kwa)

    elif tname == '24':
        mask = det.raw._mask_center(wcenter=0, center_rows=5,
                                    center_cols=3)  # dtype=DTYPE_MASK, **kwa)

    elif tname == '25':
        mask = det.raw._mask_calib_or_default()  # dtype=DTYPE_MASK)

    elif tname == '26':
        mask = test_umask(det)

    elif tname == '27':
        mask = det.raw._mask_comb(\
                    status=True, status_bits=0xffff, gain_range_inds=(0,1,2,3,4),\
                    neighbors=True, rad=5, ptrn='r',\
                    edges=True, width=0, edge_rows=10, edge_cols=5,\
                    center=True, wcenter=0, center_rows=5, center_cols=3,\
                    calib=True,\
                    umask=test_umask(det),\
                    force_update=False)  # dtype=DTYPE_MASK)

    logger.info(info_ndarr(mask, '\nmask'))
    return mask
Beispiel #19
0
    def summary(self):
        t0_sec = time()

        logger.info('summary')
        logger.info('%s\nraw data found/selected in %d events' %
                    (80 * '_', self.irec + 1))

        if self.irec > 0:
            logger.info('begin data summary stage')
        else:
            logger.info('irec=%d there are no arrays to save...' % self.irec)
            return

        savebw = self.savebw
        int_hi = self.int_hi
        int_lo = self.int_lo
        intnhi = self.intnhi
        intnlo = self.intnlo
        rms_hi = self.rms_hi
        rms_lo = self.rms_lo
        rmsnhi = self.rmsnhi
        rmsnlo = self.rmsnlo

        fraclm = self.fraclm
        counter = self.irec
        doplot = self.doplot

        arr_av1 = divide_protected(self.arr_sum1, self.arr_sum0)
        arr_av2 = divide_protected(self.arr_sum2, self.arr_sum0)

        frac_int_lo = np.array(self.sta_int_lo / counter, dtype=np.float32)
        frac_int_hi = np.array(self.sta_int_hi / counter, dtype=np.float32)

        arr_rms = np.sqrt(arr_av2 - np.square(arr_av1))

        logger.debug(info_ndarr(arr_rms, 'arr_rms'))
        logger.debug(info_ndarr(arr_av1, 'arr_av1'))

        rms_min, rms_max = evaluate_limits(arr_rms,
                                           rmsnlo,
                                           rmsnhi,
                                           rms_lo,
                                           rms_hi,
                                           cmt='RMS')
        ave_min, ave_max = evaluate_limits(arr_av1,
                                           intnlo,
                                           intnhi,
                                           int_lo,
                                           int_hi,
                                           cmt='AVE')

        arr_sta_rms_hi = np.select((arr_rms > rms_max, ), (self.arr1, ), 0)
        arr_sta_rms_lo = np.select((arr_rms < rms_min, ), (self.arr1, ), 0)
        arr_sta_int_hi = np.select((frac_int_hi > fraclm, ), (self.arr1, ), 0)
        arr_sta_int_lo = np.select((frac_int_lo > fraclm, ), (self.arr1, ), 0)
        arr_sta_ave_hi = np.select((arr_av1 > ave_max, ), (self.arr1, ), 0)
        arr_sta_ave_lo = np.select((arr_av1 < ave_min, ), (self.arr1, ), 0)

        logger.info('bad pixel status:'\
               +'\n  status  1: %8d pixel rms       > %.3f' % (arr_sta_rms_hi.sum(), rms_max)\
               +'\n  status  2: %8d pixel rms       < %.3f' % (arr_sta_rms_lo.sum(), rms_min)\
               +'\n  status  4: %8d pixel intensity > %g in more than %g fraction of events' % (arr_sta_int_hi.sum(), int_hi, fraclm)\
               +'\n  status  8: %8d pixel intensity < %g in more than %g fraction of events' % (arr_sta_int_lo.sum(), int_lo, fraclm)\
               +'\n  status 16: %8d pixel average   > %g'   % (arr_sta_ave_hi.sum(), ave_max)\
               +'\n  status 32: %8d pixel average   < %g'   % (arr_sta_ave_lo.sum(), ave_min)\
               )

        #0/1/2/4/8/16/32 for good/hot-rms/cold-rms/saturated/cold/average above limit/average below limit,
        arr_sta = np.zeros(arr_av1.shape, dtype=np.uint64)
        arr_sta += arr_sta_rms_hi  # hot rms
        arr_sta += arr_sta_rms_lo * 2  # cold rms
        arr_sta += arr_sta_int_hi * 4  # satturated
        arr_sta += arr_sta_int_lo * 8  # cold
        arr_sta += arr_sta_ave_hi * 16  # too large average
        arr_sta += arr_sta_ave_lo * 32  # too small average

        arr_msk = np.select((arr_sta > 0, ), (self.arr0, ), 1)

        self.arr_av1 = arr_av1
        self.arr_rms = arr_rms
        self.arr_sta = arr_sta
        self.arr_msk = np.select((arr_sta > 0, ), (self.arr0, ), 1)

        logger.debug(self.info_results())
        if doplot: self.plot_images(titpref='')

        self.block = None
        self.irec = -1
        logger.info('summary consumes %.3f sec' % (time() - t0_sec))
Beispiel #20
0
def pedestals_calibration(*args, **kwa):
    """NEWS significant ACCELERATION is acheived:
       - accumulate data for entire epix10kam_2m/quad array
       - use MPI
       all-panel or selected-panel one-step (gain range) or all steps calibration of pedestals
    """
    fname      = kwa.get('fname', None)
    detname    = kwa.get('det', None)
    exp        = kwa.get('exp', None)
    runs       = kwa.get('runs', None)
    nrecs      = kwa.get('nrecs', 1000)
    stepnum    = kwa.get('stepnum', None)
    stepmax    = kwa.get('stepmax', 5)
    evskip     = kwa.get('evskip', 0)
    events     = kwa.get('events', 1000)
    dirxtc     = kwa.get('dirxtc', None)
    dirrepo    = kwa.get('dirrepo', CALIB_REPO_EPIX10KA)
    fmt_peds   = kwa.get('fmt_peds', '%.3f')
    fmt_rms    = kwa.get('fmt_rms',  '%.3f')
    fmt_status = kwa.get('fmt_status', '%4i')
    idx_sel    = kwa.get('idx', None)
    dirmode    = kwa.get('dirmode', 0o777)
    filemode   = kwa.get('filemode', 0o666)
    usesmd     = kwa.get('usesmd', False)
    logmode    = kwa.get('logmode', 'DEBUG')
    errskip    = kwa.get('errskip', False)

    logger.setLevel(DICT_NAME_TO_LEVEL[logmode])

    #irun = runs[0] if isinstance(runs, list) else\
    #       int(runs.split(',',1)[0].split('-',1)[0]) # int first run number from str of run(s)
    irun = irun_first(runs)

    #dsname = 'exp=%s:run=%s'%(exp,runs) if dirxtc is None else 'exp=%s:run=%s:dir=%s'%(exp, runs, dirxtc)
    #if usesmd: dsname += ':smd'

    #_name = sys._getframe().f_code.co_name
    _name = SCRNAME
    logger.info('In %s\n  exp: %s\n  runs: %s\n  detector: %s' % (_name, exp, str(runs), detname))
    save_log_record_at_start(dirrepo, _name, dirmode, filemode, logmode)

    #cpdic = get_config_info_for_dataset_detname(dsname, detname)
    #tstamp      = cpdic.get('tstamp', None)
    #panel_ids   = cpdic.get('panel_ids', None)
    #expnum      = cpdic.get('expnum', None)
    #dettype     = cpdic.get('dettype', None)
    #shape       = cpdic.get('shape', None)
    #ny,nx = shape

    #panel_id = get_panel_id(panel_ids, idx)
    #logger.debug('Found panel ids:\n%s' % ('\n'.join(panel_ids)))

    #read input xtc file and accumulate block of data

    #================= MPI

    #from mpi4py import MPI
    #comm = MPI.COMM_WORLD
    #rank = comm.Get_rank()
    #size = comm.Get_size() # number of MPI nodes; 1 for regular python command

    #=================

    kwa = data_source_kwargs(**kwa)
    #ds = DataSource(**kwa)
    try: ds = DataSource(**kwa)
    except Exception as err:
        logger.error('DataSource(**kwa) does not work:\n    %s' % err)
        sys.exit('EXIT - requested DataSource does not exist or is not accessible.')

    logger.debug('ds.runnum_list = %s' % str(ds.runnum_list))
    logger.debug('ds.detectors = %s' % str(ds.detectors))
    logger.info('ds.xtc_files:\n  %s' % ('\n  '.join(ds.xtc_files)))

    mode = None # gain_mode
    nstep_tot = -1

    #orun = next(ds.runs())
    for orun in ds.runs():
      logger.debug('==run.runnum   : %d' % orun.runnum)        # 27
      logger.debug('  run.detnames : %s' % str(orun.detnames)) # {'epixquad'}
      logger.debug('  run.expt     : %s', orun.expt)           # ueddaq02

      runtstamp = orun.timestamp    # 4193682596073796843 relative to 1990-01-01
      trun_sec = seconds(runtstamp) # 1607569818.532117 sec
      #tstamp = str_tstamp(time_sec=int(trun_sec)) #fmt='%Y-%m-%dT%H:%M:%S%z'

      tstamp_run, tstamp_now = tstamps_run_and_now(int(trun_sec))
      tstamp = tstamp_run

      logger.debug('  run.timestamp: %d' % orun.timestamp)
      logger.debug('  run unix epoch time %06f sec' % trun_sec)
      logger.debug('  run tstamp: %s' % tstamp_run)
      logger.debug('  now tstamp: %s' % tstamp_now)
      det = orun.Detector(detname)
      #step_value = orun.Detector('step_value')
      try: step_docstring = orun.Detector('step_docstring')
      except Exception as err:
        logger.error('run.Detector("step_docstring") does not work:\n    %s' % err)
        sys.exit('Exit processing due to missing info about dark data step.')
      #cd = orun.Detector('ControlData') #LCLS1

      logger.debug('--- det.raw._det_name: %s' % det.raw._det_name) # epixquad
      logger.debug('    det.raw._dettype : %s' % det.raw._dettype)  # epix
      logger.debug('    det.raw._calibconst.keys(): %s' % str(det.raw._calibconst.keys())) # dict_keys(['geometry'])
      #logger.debug('    det.raw._uniqueid: %s' % det.raw._uniqueid)
      #logger.debug('    det.raw._sorted_segment_ids: %s' % str(det.raw._sorted_segment_ids))
      #logger.debug('    det.raw._fullname: %s' % det.raw._fullname())

      segment_ids = det.raw._segment_ids() #ue.segment_ids_det(det)
      segment_inds = det.raw._segment_indices() #ue.segment_indices_det(det)
      s = 'segment inds and ids in the detector'
      for i,id in zip(segment_inds,segment_ids):
          s += '\n  seg:%02d id:%s' % (i,id)
      logger.info(s)

      BIT_MASK = det.raw._data_bit_mask
      logger.info('    det.raw._data_bit_mask BIT_MASK: %s' % oct(BIT_MASK))

      #logger.debug('    det.raw._segment_ids: %s' % str(det.raw._segment_ids()))
      #logger.debug('    det.raw._segment_indices: %s' % str(det.raw._segment_indices()))

      dcfg = det.raw._config_object() #ue.config_object_det(det)

      for nstep_run, step in enumerate(orun.steps()): #(loop through calyb cycles, using only the first):
        nstep_tot += 1
        logger.info('\n=============== step %2d ===============' % nstep_tot)
        logger.debug('    step.evt._seconds: %d' % step.evt._seconds)

        metadic = json.loads(step_docstring(step))
        nstep = step_counter(metadic, nstep_tot, nstep_run, stype='pedestal')

        if nstep is None: continue

        #if size > 1:
        #    # if MPI is on process all steps, step per rank
        #    if nstep < rank: continue
        #    if nstep > rank: break

        if nstep_tot>=stepmax:
            logger.info('==== Step:%02d loop is terminated, --stepmax=%d' % (nstep_tot, stepmax))
            break

        elif stepnum is not None:
            if   nstep < stepnum:
                logger.info('==== Step:%02d is skipped, --stepnum=%d' % (nstep, stepnum))
                continue
            elif nstep > stepnum:
                logger.info('==== Step:%02d loop is terminated, --stepnum=%d' % (nstep, stepnum))
                break

        #for k,v in det.raw._seg_configs().items(): # cpo's pattern DOES NOT WORK
        for k,v in dcfg.items():
            scob = v.config
            logger.info(info_ndarr(scob.asicPixelConfig, 'seg:%02d trbits: %s asicPixelConfig:'%(k, str(scob.trbit))))
            #logger.info(info_ndarr(scob.asicPixelConfig[:,:-2,:], 'seg:%02d trbits: %s asicPixelConfig:'%(k, str(scob.trbit))))

        gmaps = ue.gain_maps_epix10ka_any(det.raw, evt=None) #dcfg, data=None)
        logger.debug('gain mode statistics:' + ue.info_pixel_gain_mode_statistics(gmaps))
        logger.debug(ue.info_pixel_gain_mode_fractions(det.raw, evt=None, msg='gain mode fractions :'))

        logger.debug('gain maps'\
          + info_ndarr(gmaps[0],'\n    FH  ')\
          + info_ndarr(gmaps[1],'\n    FM  ')\
          + info_ndarr(gmaps[2],'\n    FL  ')\
          + info_ndarr(gmaps[3],'\n    AHL ')\
          + info_ndarr(gmaps[4],'\n    AML ')\
        )

        mode = ue.find_gain_mode(det.raw, evt=None).upper()   #dcfg, data=None).upper()

        if mode in ue.GAIN_MODES_IN:
            mode_in_step = ue.GAIN_MODES_IN[nstep]
            logger.info('== step %d: dark run processing for gain mode in configuration %s and step number %s'\
                        %(nstep, mode, mode_in_step))
            if mode != mode_in_step:
              logger.warning('INCONSISTENT GAIN MODES IN CONFIGURATION AND STEP NUMBER/METADATA')
              if not errskip: sys.exit()
              logger.warning('FLAG ERRSKIP IS %s - keep processing assuming gain mode %s' % (errskip,mode))
              #continue
        else:
            logger.warning('UNRECOGNIZED GAIN MODE: %s, DARKS NOT UPDATED...'%mode)
            sys.exit()
            #return

        sh = gmaps[0].shape
        shape_block = [nrecs,] + list(sh) # [nrecs, <number-of-segments>, 352, 384]
        logger.info('Accumulate raw frames in block shape = %s' % str(shape_block))

        block=np.zeros(shape_block,dtype=np.uint16)
        nrec,nevt = -1,0

        ss = None
        for nevt,evt in enumerate(step.events()):
            raw = det.raw.raw(evt)
            do_print = selected_record(nevt)
            if raw is None:
                logger.info('==== Ev:%04d rec:%04d raw is None' % (nevt,nrec))
                continue

            if nevt < evskip:
                logger.debug('==== Ev:%04d is skipped, --evskip=%d' % (nevt,evskip))
                continue
            elif evskip>0 and (nevt == evskip):
                s = 'Events < --evskip=%d are skipped' % evskip
                #print(s)
                logger.info(s)

            if nevt > events-1:
                logger.info(ss)
                logger.info('==== Ev:%04d event loop is terminated, --events=%d' % (nevt,events))
                print()
                break

            if nrec > nrecs-2:
                logger.info(ss)
                logger.info('==== Ev:%04d event loop is terminated - collected sufficient number of frames, --nrecs=%d' % (nevt,nrecs))
                break
            else:
                nrec += 1
                ss = info_ndarr(raw & BIT_MASK, 'Ev:%04d rec:%04d raw & BIT_MASK ' % (nevt,nrec))
                if do_print: logger.info(ss)
                block[nrec]=(raw & BIT_MASK)

        if nevt < events: logger.info('==== Ev:%04d end of events in run step %d' % (nevt,nstep_run))

        print_statistics(nevt, nrec)

        #---- process statistics in block-array for panels

        for idx, panel_id in zip(segment_inds,segment_ids):

            if idx_sel is not None and idx_sel != idx: continue # skip panels with inices other than idx_sel if specified

            logger.info('\n%s\nprocess panel:%02d id:%s' % (96*'=', idx, panel_id))

            #if mode is None:
            #    msg = 'Gain mode for dark processing is not defined "%s" try to set option -m <gain-mode>' % mode
            #    logger.warning(msg)
            #    sys.exit(msg)

            dir_panel, dir_offset, dir_peds, dir_plots, dir_work, dir_gain, dir_rms, dir_status = dir_names(dirrepo, panel_id)

            #print('XXXX panel_id, tstamp, exp, irun', panel_id, tstamp, exp, irun)

            fname_prefix, panel_alias = file_name_prefix(dirrepo, det.raw._dettype, panel_id, tstamp, exp, irun)
            logger.debug('\n  fname_prefix:%s\n  panel_alias :%s' % (fname_prefix, panel_alias))

            prefix_offset, prefix_peds, prefix_plots, prefix_gain, prefix_rms, prefix_status =\
                path_prefixes(fname_prefix, dir_offset, dir_peds, dir_plots, dir_gain, dir_rms, dir_status)

            #logger.debug('Directories under %s\n  SHOULD ALREADY EXIST after charge-injection offset_calibration' % dir_panel)
            #assert os.path.exists(dir_offset), 'Directory "%s" DOES NOT EXIST' % dir_offset
            #assert os.path.exists(dir_peds),   'Directory "%s" DOES NOT EXIST' % dir_peds

            create_directory(dir_panel,  mode=dirmode)
            create_directory(dir_peds,   mode=dirmode)
            create_directory(dir_offset, mode=dirmode)
            create_directory(dir_gain,   mode=dirmode)
            create_directory(dir_rms,    mode=dirmode)
            create_directory(dir_status, mode=dirmode)


            #dark=block[:nrec,:].mean(0)  #Calculate mean

            #block.sahpe = (1024, 16, 352, 384)
            dark, rms, status = proc_dark_block(block[:nrec,idx,:], **kwa) # process pedestals per-panel (352, 384)


            #continue # TEST
            #==========

            fname = '%s_pedestals_%s.dat' % (prefix_peds, mode)
            save_2darray_in_textfile(dark, fname, filemode, fmt_peds)

            fname = '%s_rms_%s.dat' % (prefix_rms, mode)
            save_2darray_in_textfile(rms, fname, filemode, fmt_rms)

            fname = '%s_status_%s.dat' % (prefix_status, mode)
            save_2darray_in_textfile(status, fname, filemode, fmt_status)

            #if this is an auto gain ranging mode, also calculate the corresponding _L pedestal:

            if mode=='AHL-H': # evaluate AHL_L from AHL_H
                ped_hl_h = dark #[3,:,:]

                offset_hl_h = load_panel_constants(dir_offset, 'offset_AHL-H', tstamp)
                offset_hl_l = load_panel_constants(dir_offset, 'offset_AHL-L', tstamp)
                gain_hl_h   = load_panel_constants(dir_gain,   'gainci_AHL-H', tstamp)
                gain_hl_l   = load_panel_constants(dir_gain,   'gainci_AHL-L', tstamp)

                #if offset is not None:
                if all([v is not None for v in (offset_hl_h, offset_hl_l, gain_hl_h, gain_hl_l)]):
                    ped_hl_l = offset_hl_l - (offset_hl_h - ped_hl_h) * divide_protected(gain_hl_l, gain_hl_h) #V3 Gabriel's
                    fname = '%s_pedestals_AHL-L.dat' % prefix_peds
                    save_2darray_in_textfile(ped_hl_l, fname, filemode, fmt_peds)

            elif mode=='AML-M': # evaluate AML_L from AML_M
                ped_ml_m = dark #[4,:,:]

                offset_ml_m = load_panel_constants(dir_offset, 'offset_AML-M', tstamp)
                offset_ml_l = load_panel_constants(dir_offset, 'offset_AML-L', tstamp)
                gain_ml_m   = load_panel_constants(dir_gain,   'gainci_AML-M', tstamp)
                gain_ml_l   = load_panel_constants(dir_gain,   'gainci_AML-L', tstamp)

                #if offset is not None:
                if all([v is not None for v in (offset_ml_m, offset_ml_l, gain_ml_m, gain_ml_l)]):
                    ped_ml_l = offset_ml_l - (offset_ml_m - ped_ml_m) * divide_protected(gain_ml_l, gain_ml_m) #V3 Gabriel's
                    fname = '%s_pedestals_AML-L.dat' % prefix_peds
                    save_2darray_in_textfile(ped_ml_l, fname, filemode, fmt_peds)
Beispiel #21
0
    def _cached_pixel_coord_indexes(self, evt, **kwa):
        """
        """
        logger.debug('AreaDetector._cached_pixel_coord_indexes')

        resp = self._pixel_coord_indexes(**kwa)
        if resp is None: return None

        # PRESERVE PIXEL INDEXES FOR USED SEGMENTS ONLY
        segs = self._segment_numbers(evt)
        if segs is None: return None
        logger.debug(info_ndarr(segs, 'preserve pixel indices for segments '))

        rows, cols = self._pix_rc_ = [
            reshape_to_3d(a)[segs, :, :] for a in resp
        ]
        #self._pix_rc_ = [dict_from_arr3d(reshape_to_3d(v)) for v in resp]

        s = 'evaluate_pixel_coord_indexes:'
        for i, a in enumerate(self._pix_rc_):
            s += info_ndarr(a, '\n  %s ' % ('rows', 'cols')[i], last=3)
        logger.info(s)

        mapmode = kwa.get('mapmode', 2)
        if mapmode < 4:
            self.img_entries, self.dmulti_pix_to_img_idx, self.dmulti_imgidx_numentries=\
              statistics_of_pixel_arrays(rows, cols)

        if mapmode == 4:
            rsp = self._pixel_coords(**kwa)
            if rsp is None: return None
            x, y, z = self._pix_xyz_ = [
                reshape_to_3d(a)[segs, :, :] for a in rsp
            ]
            self._interpol_pars_ = init_interpolation_parameters(
                rows, cols, x, y)

        if mapmode < 4 and kwa.get('fillholes', True):
            self.img_pix_ascend_ind, self.img_holes, self.hole_rows, self.hole_cols, self.hole_inds1d =\
               statistics_of_holes(rows, cols, **kwa)

        # TBD parameters for image interpolation
        if False:
            t0_sec = time()
            self.imgind_to_seg_row_col = image_of_pixel_seg_row_col(
                img_pix_ascend_ind, arr_shape)
            logger.debug(
                'statistics_of_holes.imgind_to_seg_row_col time (sec) = %.6f' %
                (time() - t0_sec))  # 47ms
            logger.debug(
                info_ndarr(self.imgind_to_seg_row_col,
                           ' imgind_to_seg_row_col '))

            if False:
                s = ' imgind_to_seg_row_col '
                # (n,352,384)
                first = (352 + 5) * 384 + 380
                for i in range(first, first + 10):
                    s += '\n    s:%02d r:%03d c:%03d' % tuple(
                        imgind_to_seg_row_col[i])
                logger.debug(s)
Beispiel #22
0
def deploy_constants(*args, **kwa):

    from psana.pscalib.calib.NDArrIO import save_txt; global save_txt
    import psana.pscalib.calib.MDBUtils as mu
    import psana.pscalib.calib.MDBWebUtils as wu
    cc = wu.cc # import psana.pscalib.calib.CalibConstants as cc

    exp        = kwa.get('exp', None)
    detname    = kwa.get('det', None)
    runs       = kwa.get('runs', None)
    tstamp     = kwa.get('tstamp', None) # (int) time stamp in format YYYYmmddHHMMSS or run number(<10000)
    dirxtc     = kwa.get('dirxtc', None)
    dirrepo    = kwa.get('dirrepo', CALIB_REPO_EPIX10KA)
    deploy     = kwa.get('deploy', False)
    fmt_peds   = kwa.get('fmt_peds', '%.3f')
    fmt_gain   = kwa.get('fmt_gain', '%.6f')
    fmt_rms    = kwa.get('fmt_rms',  '%.3f')
    fmt_status = kwa.get('fmt_status', '%4i')
    logmode    = kwa.get('logmode', 'DEBUG')
    dirmode    = kwa.get('dirmode',  0o777)
    filemode   = kwa.get('filemode', 0o666)
    high       = kwa.get('high',   16.40) # ADU/keV
    medium     = kwa.get('medium', 5.466) # ADU/keV
    low        = kwa.get('low',    0.164) # ADU/keV
    proc       = kwa.get('proc', 'prsg')
    paninds    = kwa.get('paninds', None)
    version    = kwa.get('version', 'N/A')
    run_end    = kwa.get('run_end', 'end')
    comment    = kwa.get('comment', 'no comment')
    dbsuffix   = kwa.get('dbsuffix', '')

    logger.setLevel(DICT_NAME_TO_LEVEL[logmode])

    #dsname = 'exp=%s:run=%d'%(exp,irun) if dirxtc is None else 'exp=%s:run=%d:dir=%s'%(exp, irun, dirxtc)
    irun = irun_first(runs)
    #_name = sys._getframe().f_code.co_name
    _name = SCRNAME
    save_log_record_at_start(dirrepo, _name, dirmode, filemode, logmode)

    cpdic = get_config_info_for_dataset_detname(**kwa)
    tstamp_run  = cpdic.get('tstamp',    None) # str
    expnum      = cpdic.get('expnum',    None)
    shape       = cpdic.get('shape',     None)
    strsrc      = cpdic.get('strsrc',    None)
    panel_ids   = cpdic.get('panel_ids', None)
    panel_inds  = cpdic.get('panel_inds',None)
    dettype     = cpdic.get('dettype',   None)
    det_name    = cpdic.get('det_name',  None)
    longname    = cpdic.get('longname', detname)
    gains_def   = cpdic.get('gains_def', None)

    req_inds = None if paninds is None else [int(i) for i in paninds.split(',')] # conv str '0,1,2,3' to list [0,1,2,3]
    logger.info('In %s\n      detector: "%s" \n      requested_inds: %s' % (_name, detname, str(req_inds)))

    assert isinstance(gains_def, tuple)
    assert len(gains_def) == 3

    if high   is None: high   = gains_def[0]
    if medium is None: medium = gains_def[1]
    if low    is None: low    = gains_def[2]

    global GAIN_FACTOR_DEF
    #GAIN_MODES     = ['FH','FM','FL','AHL-H','AML-M','AHL-L','AML-L']
    GAIN_FACTOR_DEF = [high, medium, low, high, medium, low, low]

    CTYPE_FMT = {'pedestals'   : fmt_peds,
                 'pixel_gain'  : fmt_gain,
                 'pixel_rms'   : fmt_rms,
                 'pixel_status': fmt_status}

    CTYPE_DTYPE = cc.dic_calib_name_to_dtype # {'pedestals': np.float32,...}

    logger.debug('detector "%s" panel ids:\n  %s' % (detname, '\n  '.join(panel_ids)))

    #if tstamp is None: tstamp = tstamp_run
    _tstamp = tstamp_run

    logger.debug('search for calibration files with tstamp <= %s' % _tstamp)

    # dict_consts for constants octype: 'pixel_gain', 'pedestals', etc.
    dic_consts = {}
    for ind, panel_id in zip(panel_inds,panel_ids):

        if req_inds is not None and not (ind in req_inds): continue # skip non-selected panels

        logger.info('%s\nmerge constants for panel:%02d id: %s' % (98*'_', ind, panel_id))

        dir_panel, dir_offset, dir_peds, dir_plots, dir_work, dir_gain, dir_rms, dir_status = dir_names(dirrepo, panel_id)
        fname_prefix, panel_alias = file_name_prefix(dirrepo, dettype, panel_id, _tstamp, exp, irun)

        prefix_offset, prefix_peds, prefix_plots, prefix_gain, prefix_rms, prefix_status =\
            path_prefixes(fname_prefix, dir_offset, dir_peds, dir_plots, dir_gain, dir_rms, dir_status)

        #mpars = (('pedestals', 'pedestals',    prefix_peds,   dir_peds),\
        #         ('rms',       'pixel_rms',    prefix_rms,    dir_rms),\
        #         ('status',    'pixel_status', prefix_status, dir_status),\
        #         ('gain',      'pixel_gain',   prefix_gain,   dir_gain))

        mpars = []
        if 'p' in proc: mpars.append(('pedestals', 'pedestals',    prefix_peds,   dir_peds))
        if 'r' in proc: mpars.append(('rms',       'pixel_rms',    prefix_rms,    dir_rms))
        if 's' in proc: mpars.append(('status',    'pixel_status', prefix_status, dir_status))
        if 'g' in proc: mpars.append(('gain',      'pixel_gain',   prefix_gain,   dir_gain))
        if 'c' in proc: mpars.append(('gainci',    'pixel_gain',   prefix_gain,   dir_gain))
        if 'c' in proc:
             add_links_for_gainci_fixed_modes(dir_gain, fname_prefix) # FH->AHL-H, FM->AML-M, FL->AML-L/AHL-L

        for (ctype, octype, prefix, dir_ctype) in mpars:
            fmt = CTYPE_FMT.get(octype,'%.5f')
            nda_dtype = CTYPE_DTYPE.get(octype, np.float32)

            logger.debug('begin merging for ctype:%s, octype:%s, fmt:%s,\n  prefix:%s' % (ctype, octype, fmt, prefix))
            fname = '%s_%s.txt' % (prefix, ctype)
            nda = merge_panel_gain_ranges(dir_ctype, panel_id, ctype, _tstamp, shape, nda_dtype, fname, fmt, filemode)
            if octype in dic_consts: dic_consts[octype].append(nda) # append for panel per ctype
            else:                    dic_consts[octype] = [nda,]

    logger.info('\n%s\nMERGE PANEL CONSTANTS AND DEPLOY THEM\n' % (80*'_'))

    #if deploy:

    dmerge = dir_merge(dirrepo)
    create_directory(dmerge, mode=dirmode)
    fmerge_prefix = fname_prefix_merge(dmerge, detname, _tstamp, exp, irun)

    for octype, lst in dic_consts.items():
        mrg_nda = merge_panels(lst)
        logger.info(info_ndarr(mrg_nda, 'merged constants for %s ' % octype))
        fmerge = '%s-%s.txt' % (fmerge_prefix, octype)
        fmt = CTYPE_FMT.get(octype,'%.5f')
        save_ndarray_in_textfile(mrg_nda, fmerge, filemode, fmt)

        if True: # deploy:

          # check opt "-t" if constants need to be deployed with diffiernt time stamp or run number
          use_external_run = tstamp is not None and tstamp<10000
          use_external_ts  = tstamp is not None and tstamp>9999
          tvalid_sec = time_sec_from_stamp(fmt=cc.TSFORMAT_SHORT, time_stamp=str(tstamp))\
                  if use_external_ts else cpdic.get('trun_sec', None)
          ivalid_run = tstamp if use_external_run else irun\
                  if not use_external_ts else 0

          dtype = 'ndarray'

          kwa = {
            'iofname'    : fmerge,
            'experiment' : exp,
            'ctype'      : octype,
            'dtype'      : dtype,
            'detector'   : detname,
            'detname'    : det_name,
            'longname'   : longname,
            'time_sec'   : tvalid_sec,
            'time_stamp' : str_tstamp(fmt=cc.TSFORMAT, time_sec=int(tvalid_sec)),
            'tsshort'    : str_tstamp(fmt=cc.TSFORMAT_SHORT, time_sec=int(tvalid_sec)),
            'tstamp_orig': cpdic.get('tsrun_dark', None),
            'run'        : ivalid_run,
            'run_end'    : run_end,
            'run_orig'   : irun,
            'version'    : version,
            'comment'    : comment,
            'extpars'    : {'content':'extended parameters dict->json->str',},
            'dettype'    : dettype,
            'dbsuffix'   : dbsuffix
          }

          logger.debug('DEPLOY metadata: %s' % str(kwa))

          data = mu.data_from_file(fmerge, octype, dtype, True)
          logger.debug(info_ndarr(data, 'merged constants loaded from file'))

          if deploy:
            resp = wu.deploy_constants(data, exp, longname, url=cc.URL_KRB, krbheaders=cc.KRBHEADERS, **kwa)
            #id_data_exp, id_data_det, id_doc_exp, id_doc_det = resp if resp is not None

          else:
            logger.warning('TO DEPLOY CONSTANTS ADD OPTION -D')
Beispiel #23
0
    def mask_comb(self,
                  status=True,
                  neighbors=False,
                  edges=False,
                  center=False,
                  calib=False,
                  umask=None,
                  dtype=DTYPE_MASK,
                  **kwa):
        """Returns combined mask controlled by the keyword arguments.
           Parameters
           ----------
           - status   : bool : True  - mask from pixel_status constants,
                                       kwa: status_bits=0xffff - status bits to use in mask.
                                       Status bits show why pixel is considered as bad.
                                       Content of the bitword depends on detector and code version.
                                       It is wise to exclude pixels with any bad status by setting status_bits=0xffff.
                                       kwa: gain_range_inds=(0,1,2,3,4) - list of gain range indexes to merge for epix10ka or jungfrau
           - neighbor : bool : False - mask of neighbors of all bad pixels,
                                       kwa: rad=5 - radial parameter of masked region
                                       kwa: ptrn='r'-rhombus, 'c'-circle, othervise square region around each bad pixel
           - edges    : bool : False - mask edge rows and columns of each panel,
                                       kwa: width=0 or edge_rows=1, edge_cols=1 - number of masked edge rows, columns
           - center   : bool : False - mask center rows and columns of each panel consisting of ASICS (cspad, epix, jungfrau),
                                       kwa: wcenter=0 or center_rows=1, center_cols=1 -
                                       number of masked center rows and columns in the segment,
                                       works for cspad2x1, epix100, epix10ka, jungfrau panels
           - calib    : bool : False - apply user's defined mask from pixel_mask constants
           - umask  : np.array: None - apply user's defined mask from input parameters (shaped as data)

           Returns
           -------
           np.array: dtype=np.uint8, shape as det.raw - mask array of 1 or 0 or None if all switches are False.
        """

        logger.debug('MaskAlgos.mask_comb ---- mask evolution')

        mask = None
        if status:
            status_bits = kwa.get('status_bits', 0xffff)
            gain_range_inds = kwa.get('gain_range_inds',
                                      None)  # (0,1,2,3,4) works for epix10ka
            mask = self.mask_from_status(status_bits=status_bits,
                                         gain_range_inds=gain_range_inds,
                                         dtype=dtype)
            logger.debug(
                info_ndarr(mask, 'in mask_comb after mask_from_status'))


#        if unbond and (self.is_cspad2x2() or self.is_cspad()):
#            mask_unbond = self.mask_geo(par, width=0, wcenter=0, mbits=4) # mbits=4 - unbonded pixels for cspad2x1 segments
#            mask = mask_unbond if mask is None else um.merge_masks(mask, mask_unbond)

        if neighbors and mask is not None:
            rad = kwa.get('rad', 5)
            ptrn = kwa.get('ptrn', 'r')
            mask = um.mask_neighbors(mask, rad=rad, ptrn=ptrn)
            logger.debug(info_ndarr(mask,
                                    'in mask_comb after mask_neighbors:'))

        if edges:
            width = kwa.get('width', 0)
            erows = kwa.get('edge_rows', 1)
            ecols = kwa.get('edge_cols', 1)
            mask_edges = self.mask_edges(
                width=width, edge_rows=erows, edge_cols=ecols,
                dtype=dtype)  # masks each segment edges only
            mask = mask_edges if mask is None else um.merge_masks(
                mask, mask_edges, dtype=dtype)
            logger.debug(info_ndarr(mask, 'in mask_comb after mask_edges:'))

        if center:
            wcent = kwa.get('wcenter', 0)
            crows = kwa.get('center_rows', 1)
            ccols = kwa.get('center_cols', 1)
            mask_center = self.mask_center(wcenter=wcent,
                                           center_rows=crows,
                                           center_cols=ccols,
                                           dtype=dtype)
            mask = mask_center if mask is None else um.merge_masks(
                mask, mask_center, dtype=dtype)
            logger.debug(info_ndarr(mask, 'in mask_comb after mask_center:'))

        if calib:
            mask_calib = self.mask_calib_or_default(dtype=dtype)
            mask = mask_calib if mask is None else um.merge_masks(
                mask, mask_calib, dtype=dtype)
            logger.debug(info_ndarr(mask, 'in mask_comb after mask_calib:'))

        if umask is not None:
            mask = umask if mask is None else um.merge_masks(
                mask, umask, dtype=dtype)

        logger.debug(info_ndarr(mask, 'in mask_comb at exit:'))

        return mask