def __init__(self, FD, bins=100):
     '''
     FD: the handler of a compressed Eiger frames
     bins: bins number
    '''          
     
     self.FD=FD
     if (FD.end - FD.beg)%bins:
         print ('Please give a better bins number and make the length of FD/bins= integer')
     else:    
         self.bins = bins
         self.md = FD.md
         #self.beg = FD.beg  
         self.beg = 0
         Nimg = (FD.end - FD.beg)
         slice_num =   Nimg//bins 
         self.end =  slice_num        
         self.time_edge = np.array(create_time_slice( N= Nimg, 
                                 slice_num= slice_num, slice_width= bins )) + FD.beg
         self.get_bin_frame()
Exemple #2
0
    def __init__(self, FD, bins=100):
        '''
        FD: the handler of a compressed Eiger frames
        bins: bins number
       '''

        self.FD = FD
        if (FD.end - FD.beg) % bins:
            print(
                'Please give a better bins number and make the length of FD/bins= integer'
            )
        else:
            self.bins = bins
            self.md = FD.md
            #self.beg = FD.beg
            self.beg = 0
            Nimg = (FD.end - FD.beg)
            slice_num = Nimg // bins
            self.end = slice_num
            self.time_edge = np.array(
                create_time_slice(
                    N=Nimg, slice_num=slice_num, slice_width=bins)) + FD.beg
            self.get_bin_frame()
def init_compress_eigerdata( images, mask, md, filename, 
                        bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, 
                            bad_pixel_low_threshold=0,nobytes=4, bins=1, with_pickle=True  ):    
    '''
        Compress the eiger data 
        
        Create a new mask by remove hot_pixel
        Do image average
        Do each image sum
        Find badframe_list for where image sum above bad_pixel_threshold
        Generate a compressed data with filename
        
        if bins!=1, will bin the images with bin number as bins
    
        Header contains 1024 bytes ['Magic value', 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance', 
           'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size', 
           bytes per pixel (either 2 or 4 (Default)),
           Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End ]
           
        Return 
            mask
            avg_img
            imsum
            bad_frame_list
            
    ''' 
    fp = open( filename,'wb' )
    #Make Header 1024 bytes   
    #md = images.md
    if bins!=1:
        nobytes=8
        
    Header = struct.pack('@16s8d7I916x',b'Version-COMP0001',
                        md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'],
                        md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'],
                        nobytes, md['pixel_mask'].shape[1], md['pixel_mask'].shape[0],
                         0, md['pixel_mask'].shape[1],
                         0, md['pixel_mask'].shape[0]                
                    )
      
    fp.write( Header)  
    
    Nimg_ = len( images)
    avg_img = np.zeros_like(    images[0], dtype= np.float ) 
    Nopix =  float( avg_img.size )
    n=0
    good_count = 0
    frac = 0.0
    if nobytes==2:
        dtype= np.int16
    elif nobytes==4:
        dtype= np.int32
    elif nobytes==8:
        dtype=np.float64
    else:
        print ( "Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]")
        dtype= np.int32        
        
        
    Nimg =   Nimg_//bins 
    time_edge = np.array(create_time_slice( N= Nimg_, 
                                    slice_num= Nimg, slice_width= bins ))    
    
    imgsum  =  np.zeros(    Nimg   )         
    if bins!=1:
        print('The frames will be binned by %s'%bins) 
     
    for n in  tqdm( range(Nimg) ):            
        t1,t2 = time_edge[n]
        img = np.average(  images[t1:t2], axis=0   )        
        mask &= img < hot_pixel_threshold   
        p = np.where( (np.ravel(img)>0) &  np.ravel(mask) )[0] #don't use masked data  
        v = np.ravel( np.array( img, dtype= dtype )) [p]
        dlen = len(p)         
        imgsum[n] = v.sum()
        if (imgsum[n] >bad_pixel_threshold) or (imgsum[n] <=bad_pixel_low_threshold):
        #if imgsum[n] >=bad_pixel_threshold :
            dlen = 0
            fp.write(  struct.pack( '@I', dlen  ))    
        else:      
            np.ravel(avg_img )[p] +=   v
            good_count +=1 
            frac += dlen/Nopix
            #s_fmt ='@I{}i{}{}'.format( dlen,dlen,'ih'[nobytes==2])
            fp.write(  struct.pack( '@I', dlen   ))
            fp.write(  struct.pack( '@{}i'.format( dlen), *p))
            if bins==1:
                fp.write(  struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *v)) 
            else:
                fp.write(  struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2]  ), *v)) 
        #n +=1     
        
    fp.close() 
    frac /=good_count
    print( "The fraction of pixel occupied by photon is %6.3f%% "%(100*frac) ) 
    avg_img /= good_count
    
    bad_frame_list = np.where( (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold)  )[0]
    #bad_frame_list1 = np.where( np.array(imgsum) > bad_pixel_threshold  )[0]
    #bad_frame_list2 = np.where( np.array(imgsum) < bad_pixel_low_threshold  )[0]
    #bad_frame_list =   np.unique( np.concatenate( [bad_frame_list1, bad_frame_list2]) )
    
    
    if len(bad_frame_list):
        print ('Bad frame list are: %s' %bad_frame_list)
    else:
        print ('No bad frames are involved.')
    if  with_pickle:
        pkl.dump( [mask, avg_img, imgsum, bad_frame_list], open(filename + '.pkl', 'wb' ) )    
    return   mask, avg_img, imgsum, bad_frame_list
def segment_compress_eigerdata( images,  mask, md, filename, 
                        bad_pixel_threshold=1e15, hot_pixel_threshold=2**30, 
                            bad_pixel_low_threshold=0, nobytes=4, bins=1, 
                               N1=None, N2=None, dtypes='images',reverse =True    ):     
    '''
    Create a compressed eiger data without header, this function is for parallel compress
    for parallel compress don't pass any non-scalar parameters
    '''     
    
    if dtypes=='uid':
        uid= md['uid'] #images
        detector = get_detector( db[uid ] )
        images = load_data( uid, detector, reverse= reverse    )[N1:N2] 
        
    print(N1,N2)
    
    
    Nimg_ = len( images) 
    
    M,N = images[0].shape
    avg_img = np.zeros( [M,N], dtype= np.float )    
    Nopix =  float( avg_img.size )
    n=0
    good_count = 0
    #frac = 0.0
    if nobytes==2:
        dtype= np.int16
    elif nobytes==4:
        dtype= np.int32
    elif nobytes==8:
        dtype=np.float64
    else:
        print ( "Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]")
        dtype= np.int32     
        
        
    #Nimg =   Nimg_//bins 
    Nimg = int( np.ceil( Nimg_ / bins  ) )
    time_edge = np.array(create_time_slice( N= Nimg_, 
                                    slice_num= Nimg, slice_width= bins ))
    #print( time_edge, Nimg_, Nimg, bins, N1, N2 )
    imgsum  =  np.zeros(    Nimg   )    
    
    if bins!=1:
        print('The frames will be binned by %s'%bins) 
        
    fp = open( filename,'wb' )    
    for n in   range(Nimg):            
        t1,t2 = time_edge[n]  
        if bins!=1:
            img = np.array( np.average(  images[t1:t2], axis=0   )   , dtype=np.float64)  #dtype=np.int32)
        else:
            img =   np.array( images[t1], dtype=np.int32) 
        mask &= img < hot_pixel_threshold         
        p = np.where( (np.ravel(img)>0) *  np.ravel(mask) )[0] #don't use masked data 
        v = np.ravel( np.array( img, dtype= dtype )) [p] 
        dlen = len(p)         
        imgsum[n] = v.sum()  
        if (dlen==0) or (imgsum[n] > bad_pixel_threshold) or (imgsum[n] <=bad_pixel_low_threshold):
            dlen = 0
            fp.write(  struct.pack( '@I', dlen  ))    
        else:              
            np.ravel( avg_img )[p] +=   v            
            good_count +=1             
            fp.write(  struct.pack( '@I', dlen   ))
            fp.write(  struct.pack( '@{}i'.format( dlen), *p))
            if bins==1:
                fp.write(  struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *v)) 
            else:
                fp.write(  struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2]  ), *v))        #n +=1
        del  p,v, img 
        fp.flush()         
    fp.close()      
    avg_img /= good_count 
    bad_frame_list =  (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold)
    sys.stdout.write('#')    
    sys.stdout.flush()    
    #del  images, mask, avg_img, imgsum, bad_frame_list
    #print( 'Should release memory here')    
    return   mask, avg_img, imgsum, bad_frame_list
Exemple #5
0
def init_compress_eigerdata(images,
                            mask,
                            md,
                            filename,
                            bad_pixel_threshold=1e15,
                            hot_pixel_threshold=2**30,
                            bad_pixel_low_threshold=0,
                            nobytes=4,
                            bins=1,
                            with_pickle=True,
                            direct_load_data=False,
                            data_path=None):
    '''
        Compress the eiger data

        Create a new mask by remove hot_pixel
        Do image average
        Do each image sum
        Find badframe_list for where image sum above bad_pixel_threshold
        Generate a compressed data with filename

        if bins!=1, will bin the images with bin number as bins

        Header contains 1024 bytes ['Magic value', 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance',
           'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size',
           bytes per pixel (either 2 or 4 (Default)),
           Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End ]

        Return
            mask
            avg_img
            imsum
            bad_frame_list

    '''
    fp = open(filename, 'wb')
    #Make Header 1024 bytes
    #md = images.md
    if bins != 1:
        nobytes = 8

    Header = struct.pack('@16s8d7I916x', b'Version-COMP0001',
                         md['beam_center_x'], md['beam_center_y'],
                         md['count_time'], md['detector_distance'],
                         md['frame_time'], md['incident_wavelength'],
                         md['x_pixel_size'], md['y_pixel_size'], nobytes,
                         md['pixel_mask'].shape[1], md['pixel_mask'].shape[0],
                         0, md['pixel_mask'].shape[1], 0,
                         md['pixel_mask'].shape[0])

    fp.write(Header)

    Nimg_ = len(images)
    avg_img = np.zeros_like(images[0], dtype=np.float)
    Nopix = float(avg_img.size)
    n = 0
    good_count = 0
    frac = 0.0
    if nobytes == 2:
        dtype = np.int16
    elif nobytes == 4:
        dtype = np.int32
    elif nobytes == 8:
        dtype = np.float64
    else:
        print(
            "Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]")
        dtype = np.int32

    Nimg = Nimg_ // bins
    time_edge = np.array(
        create_time_slice(N=Nimg_, slice_num=Nimg, slice_width=bins))

    imgsum = np.zeros(Nimg)
    if bins != 1:
        print('The frames will be binned by %s' % bins)

    for n in tqdm(range(Nimg)):
        t1, t2 = time_edge[n]
        img = np.average(images[t1:t2], axis=0)
        mask &= img < hot_pixel_threshold
        p = np.where((np.ravel(img) > 0)
                     & np.ravel(mask))[0]  #don't use masked data
        v = np.ravel(np.array(img, dtype=dtype))[p]
        dlen = len(p)
        imgsum[n] = v.sum()
        if (imgsum[n] > bad_pixel_threshold) or (imgsum[n] <=
                                                 bad_pixel_low_threshold):
            #if imgsum[n] >=bad_pixel_threshold :
            dlen = 0
            fp.write(struct.pack('@I', dlen))
        else:
            np.ravel(avg_img)[p] += v
            good_count += 1
            frac += dlen / Nopix
            #s_fmt ='@I{}i{}{}'.format( dlen,dlen,'ih'[nobytes==2])
            fp.write(struct.pack('@I', dlen))
            fp.write(struct.pack('@{}i'.format(dlen), *p))
            if bins == 1:
                fp.write(
                    struct.pack('@{}{}'.format(dlen, 'ih'[nobytes == 2]), *v))
            else:
                fp.write(
                    struct.pack('@{}{}'.format(dlen, 'dd'[nobytes == 2]), *v))
        #n +=1

    fp.close()
    frac /= good_count
    print("The fraction of pixel occupied by photon is %6.3f%% " %
          (100 * frac))
    avg_img /= good_count

    bad_frame_list = np.where((np.array(imgsum) > bad_pixel_threshold) | (
        np.array(imgsum) <= bad_pixel_low_threshold))[0]
    #bad_frame_list1 = np.where( np.array(imgsum) > bad_pixel_threshold  )[0]
    #bad_frame_list2 = np.where( np.array(imgsum) < bad_pixel_low_threshold  )[0]
    #bad_frame_list =   np.unique( np.concatenate( [bad_frame_list1, bad_frame_list2]) )

    if len(bad_frame_list):
        print('Bad frame list are: %s' % bad_frame_list)
    else:
        print('No bad frames are involved.')
    if with_pickle:
        pkl.dump([mask, avg_img, imgsum, bad_frame_list],
                 open(filename + '.pkl', 'wb'))
    return mask, avg_img, imgsum, bad_frame_list
Exemple #6
0
def segment_compress_eigerdata(images,
                               mask,
                               md,
                               filename,
                               bad_pixel_threshold=1e15,
                               hot_pixel_threshold=2**30,
                               bad_pixel_low_threshold=0,
                               nobytes=4,
                               bins=1,
                               N1=None,
                               N2=None,
                               dtypes='images',
                               reverse=True,
                               direct_load_data=False,
                               data_path=None):
    '''
    Create a compressed eiger data without header, this function is for parallel compress
    for parallel compress don't pass any non-scalar parameters
    '''

    if dtypes == 'uid':
        uid = md['uid']  #images
        if not direct_load_data:
            detector = get_detector(db[uid])
            images = load_data(uid, detector, reverse=reverse)[N1:N2]
        else:
            images = EigerImages(data_path, md)[N1:N2]
    Nimg_ = len(images)
    M, N = images[0].shape
    avg_img = np.zeros([M, N], dtype=np.float)
    Nopix = float(avg_img.size)
    n = 0
    good_count = 0
    #frac = 0.0
    if nobytes == 2:
        dtype = np.int16
    elif nobytes == 4:
        dtype = np.int32
    elif nobytes == 8:
        dtype = np.float64
    else:
        print(
            "Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]")
        dtype = np.int32

    #Nimg =   Nimg_//bins
    Nimg = int(np.ceil(Nimg_ / bins))
    time_edge = np.array(
        create_time_slice(N=Nimg_, slice_num=Nimg, slice_width=bins))
    #print( time_edge, Nimg_, Nimg, bins, N1, N2 )
    imgsum = np.zeros(Nimg)
    if bins != 1:
        #print('The frames will be binned by %s'%bins)
        dtype = np.float64

    fp = open(filename, 'wb')
    for n in range(Nimg):
        t1, t2 = time_edge[n]
        if bins != 1:
            img = np.array(np.average(images[t1:t2], axis=0), dtype=dtype)
        else:
            img = np.array(images[t1], dtype=dtype)
        mask &= img < hot_pixel_threshold
        p = np.where(
            (np.ravel(img) > 0) * np.ravel(mask))[0]  #don't use masked data
        v = np.ravel(np.array(img, dtype=dtype))[p]
        dlen = len(p)
        imgsum[n] = v.sum()
        if (dlen == 0) or (imgsum[n] > bad_pixel_threshold) or (
                imgsum[n] <= bad_pixel_low_threshold):
            dlen = 0
            fp.write(struct.pack('@I', dlen))
        else:
            np.ravel(avg_img)[p] += v
            good_count += 1
            fp.write(struct.pack('@I', dlen))
            fp.write(struct.pack('@{}i'.format(dlen), *p))
            if bins == 1:
                fp.write(
                    struct.pack('@{}{}'.format(dlen, 'ih'[nobytes == 2]), *v))
            else:
                fp.write(
                    struct.pack('@{}{}'.format(dlen, 'dd'[nobytes == 2]),
                                *v))  #n +=1
        del p, v, img
        fp.flush()
    fp.close()
    avg_img /= good_count
    bad_frame_list = (np.array(imgsum) > bad_pixel_threshold) | (
        np.array(imgsum) <= bad_pixel_low_threshold)
    sys.stdout.write('#')
    sys.stdout.flush()
    #del  images, mask, avg_img, imgsum, bad_frame_list
    #print( 'Should release memory here')
    return mask, avg_img, imgsum, bad_frame_list