Esempio n. 1
0
def show_qzr_map(  qr, qz, inc_x0, data=None, Nzline=10,Nrline=10  ):
    
    import matplotlib.pyplot as plt    
    import copy
    import matplotlib.cm as mcm
    
    cmap='viridis'
    _cmap = copy.copy((mcm.get_cmap(cmap)))
    _cmap.set_under('w', 0)
    
    
    
    qr_start, qr_end, qr_num = qr.min(),qr.max(), Nzline
    qz_start, qz_end, qz_num = qz.min(),qz.max(), Nrline 
    qr_edge, qr_center = get_qedge(qr_start , qr_end, ( qr_end- qr_start)/(qr_num+100), qr_num )
    qz_edge, qz_center = get_qedge( qz_start,   qz_end,   (qz_end - qz_start)/(qz_num+100 ) ,  qz_num )

    label_array_qz = get_qmap_label( qz, qz_edge)
    label_array_qr = get_qmap_label( qr, qr_edge)
 
    labels_qz, indices_qz = roi.extract_label_indices( label_array_qz  )
    labels_qr, indices_qr = roi.extract_label_indices( label_array_qr  )
    num_qz = len(np.unique( labels_qz ))
    num_qr = len(np.unique( labels_qr ))
     


    fig, ax = plt.subplots()
    if data is None:
        data=qr+qz        
        im = ax.imshow(data, cmap='viridis',origin='lower') 
    else:
        im = ax.imshow(data, cmap='viridis',origin='lower',  norm= LogNorm(vmin=0.001, vmax=1e1)) 

    imr=ax.imshow(label_array_qr, origin='lower' ,cmap='viridis', vmin=0.5,vmax= None  )#,interpolation='nearest',) 
    imz=ax.imshow(label_array_qz, origin='lower' ,cmap='viridis', vmin=0.5,vmax= None )#,interpolation='nearest',) 

    caxr = fig.add_axes([0.81, 0.1, 0.03, .8])  #x,y, width, heigth
     
    cba = fig.colorbar(im, cax=caxr    )      
    ax.set_xlabel(r'$q_r$', fontsize=18)
    ax.set_ylabel(r'$q_z$',fontsize=18)
 
    zticks,zticks_label  = get_qz_tick_label(qz,label_array_qz)
    #rticks,rticks_label  = get_qr_tick_label(label_array_qr,inc_x0)

    rticks,rticks_label = zip(*sorted(  zip( *get_qr_tick_label( qr, label_array_qr, inc_x0) ))  )

    ax.set_yticks( zticks[::1] )
    yticks =  zticks_label[::1]
    ax.set_yticklabels(yticks, fontsize=9)

    stride = int(len(rticks)/7)
    ax.set_xticks( rticks[::stride] )
    xticks =  rticks_label[::stride]
    ax.set_xticklabels(xticks, fontsize=9)


    ax.set_title( 'Q-zr_Map', y=1.03,fontsize=18)
    plt.show()    
def autocor_one_time( num_buf,  rois, imgs, num_lev=None, start_img=None, end_img=None,
                    bad_images = None, threshold=None):
    
    ''' 
    Dec 16, 2015, Y.G.@CHX
    a multi-tau code for one-time correlation function,
    add new funciton to deal with bad images, which masked intensities are still
    large than threshold
    
    Parameters:
        num_buf: int, number of buffer
        rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs
        imgs: pims sequences, image stack
    Options:
        num_lev: int, number of level, if None: = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1
        start_img: int, None, =0
        end_img: int, None, = len(imgs)
        bad_images: list, None,bad_images list
        threshold: float, None, intensity max threshold, above which is considered as bad images
   
    Return:
    g2, 2D-array, shape as (tau, q)
    tau, 1D-array    
    
    One example:
        
        g2, tau = autocor_one_time( num_buf,  ring_mask, imgsr, num_lev=None,
                                       bad_images=None, threshold= 65500 )
    '''
      
    start_time = time.time()
    #print (dly)
    if start_img is None:
        start_img=0
    if end_img is None:
        try:
            end_img= len(imgs)
        except:
            end_img= imgs.length
            
    #print (start_img, end_img)    
    noframes = end_img - start_img #+ 1
    #print (noframes)
    ring_mask = rois
    if num_lev is None:num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1
    nolev = num_lev
    nobuf =num_buf
    print ( 'The lev number is %s'%num_lev)
    
    dly, dict_dly = delays( num_lev, num_buf, time=1 )
    #print (dly.max())
    lev_leng = np.array( [  len(  dict_dly[i] ) for i in list(dict_dly.keys())   ])
    
    qind, pixelist = roi.extract_label_indices(   ring_mask  )
    noqs = np.max(qind)    
    nopr = np.bincount(qind, minlength=(noqs+1))[1:]
    nopixels = nopr.sum()     
    start_time = time.time() 
   
    buf =  np.ma.zeros([num_lev,num_buf,nopixels])
    buf.mask = True   
            
    
    cts=np.zeros(num_lev)
    cur=np.ones(num_lev) * num_buf
    countl = np.array( np.zeros(  num_lev ),dtype='int')  
    
    g2 =  np.zeros( [ noframes, noframes, noqs] )   
    
    G=np.zeros( [(nolev+1)*int(nobuf/2),noqs])
    IAP=np.zeros( [(nolev+1)*int(nobuf/2),noqs])
    IAF=np.zeros( [(nolev+1)*int(nobuf/2),noqs])
    num= np.array( np.zeros(  num_lev ),dtype='int')  
    
    Num= { key: [0]* len(  dict_dly[key] ) for key in list(dict_dly.keys())  }
    print ('Doing g2 caculation of %s frames---'%(noframes ))
    ttx=0     
    #if bad_images is None:bad_images=[]
    for n in range( start_img, end_img ):   ##do the work here  
        img = imgs[n] 
        
    #for n, img in enumerate( imgs):
        img_ = (np.ravel(img))[pixelist]
        
        #print ( img_.max() )
        if threshold is not None:
            if img_.max() >= threshold:
                print ('bad image: %s here!'%n)
                img_ =  np.ma.zeros( len(img_) )
                img_.mask = True    
                
        if bad_images is not None:        
            if n in bad_images:
                print ('bad image: %s here!'%n)
                img_ =  np.ma.zeros( len(img_) )
                img_.mask = True 
        
        
        cur[0]=1+cur[0]%num_buf  # increment buffer  
 
        buf[0, cur[0]-1 ]=  img_

        img=[] #//save space 
        img_=[]
        countl[0] = 1+ countl[0]
 
        process_one_time(lev=0, bufno=cur[0]-1,
            G=G,IAP=IAP,IAF=IAF, buf=buf, num=num, num_buf=num_buf, noqs=noqs, qind=qind, nopr=nopr, dly=dly, Num=Num, lev_leng=lev_leng )     
        #time_ind[0].append(  current_img_time   )
        processing=1
        lev=1
        while processing:
            if cts[lev]:
                prev=  1+ (cur[lev-1]-1-1+num_buf)%num_buf
                cur[lev]=  1+ cur[lev]%num_buf
                countl[lev] = 1+ countl[lev] 
 
                bufa = buf[lev-1,prev-1]
                bufb=  buf[lev-1,cur[lev-1]-1] 
                
                if (bufa.data==0).all():
                    buf[lev,cur[lev]-1] =  bufa
                elif (bufb.data==0).all():
                    buf[lev,cur[lev]-1] = bufb 
                else:
                    buf[lev,cur[lev]-1] = ( bufa + bufb ) /2. 
                
                cts[lev]=0                
                t1_idx=   (countl[lev]-1) *2
 
                process_one_time(lev=lev, bufno=cur[lev]-1,
                        G=G,IAP=IAP,IAF=IAF, buf=buf, num=num, num_buf=num_buf, noqs=noqs, qind=qind, nopr=nopr, dly=dly,Num =Num, lev_leng=lev_leng )     
 
                lev+=1
                #//Since this level finished, test if there is a next level for processing
                if lev<num_lev:processing = 1
                else:processing = 0                                
            else:
                cts[lev]=1      #// set flag to process next time
                processing=0    #// can stop until more images are accumulated              
 
        
        if  n %( int(noframes/10) ) ==0:
            sys.stdout.write("#")
            sys.stdout.flush()                
    #print G.shape    
    if (len(np.where(IAP==0)[0])!=0) and ( 0 not in nopr):
        gmax = np.where(IAP==0)[0][0]        
    else:
        gmax=IAP.shape[0]
    #g2=G/(IAP*IAF)
    #print G
    g2=(G[:gmax]/(IAP[:gmax]*IAF[:gmax]))       
    elapsed_time = time.time() - start_time
    #print (Num)
    print ('Total time: %.2f min' %(elapsed_time/60.))        
    return  g2,dly[:gmax]  #, elapsed_time/60.
def autocor_two_time( num_buf, rois, imgs, num_lev=None, start_img=None, end_img=None    ):
    
    ''' 
    Dec 16, 2015, Y.G.@CHX
    a multi-tau code for two-time correlation function  
    
    Parameters:
        num_buf: int, number of buffer
        rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs
        imgs: pims sequences, image stack
    Options:
        num_lev: int, number of level, if None: = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1
        start_img: int, None, =0
        end_img: int, None, = len(imgs)
        #to be done to deal with bad frames
        #bad_images: list, None,bad_images list
        #threshold: float, None, intensity max threshold, above which is considered as bad images
   
    Return:
    g12, 3D-array, shape as ( len(imgs), len(imgs), q)
       
        
    One example:
        
        g12  = autocor_two_time( num_buf,  ring_mask, imgsr, num_lev=None )
    '''
    

    #print (dly)
    if start_img is None:start_img=0
    if end_img is None:
        try:
            end_img= len(imgs)
        except:
            end_img= imgs.length
            
    #print (start_img, end_img)    
    noframes = end_img - start_img #+ 1
    #print (noframes)
    ring_mask = rois
    if num_lev is None:num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1
    print ( 'The lev number is %s'%num_lev)
    
    dly, dict_dly = delays( num_lev, num_buf, time=1 )
    #print (dly.max())
    
    qind, pixelist = roi.extract_label_indices(   ring_mask  )
    noqs = np.max(qind)    
    nopr = np.bincount(qind, minlength=(noqs+1))[1:]
    nopixels = nopr.sum() 
    
    start_time = time.time()
    
    buf=np.zeros([num_lev,num_buf,nopixels])  #// matrix of buffers, for store img
    
    
    cts=np.zeros(num_lev)
    cur=np.ones(num_lev) * num_buf
    countl = np.array( np.zeros(  num_lev ),dtype='int')  
    
    g12 =  np.zeros( [ noframes, noframes, noqs] )      
    
    num= np.array( np.zeros(  num_lev ),dtype='int')          
    time_ind ={key: [] for key in range(num_lev)}   
    
    ttx=0        
    for n in range( start_img, end_img ):   ##do the work here
        
        cur[0]=1+cur[0]%num_buf  # increment buffer  
        img = imgs[n] 
        
        #print ( 'The insert image is %s' %(n) )
    
        buf[0, cur[0]-1 ]=  (np.ravel(img))[pixelist]
        img=[] #//save space 
        countl[0] = 1+ countl[0]
        current_img_time = n - start_img +1
    
        process_two_time(lev=0, bufno=cur[0]-1,n=current_img_time,
                        g12=g12, buf=buf, num=num, num_buf=num_buf, noqs=noqs, qind=qind, nopr=nopr, dly=dly)     
        time_ind[0].append(  current_img_time   )
        processing=1
        lev=1
        while processing:
            if cts[lev]:
                prev=  1+ (cur[lev-1]-1-1+num_buf)%num_buf
                cur[lev]=  1+ cur[lev]%num_buf
                countl[lev] = 1+ countl[lev]                                
                buf[lev,cur[lev]-1] = ( buf[lev-1,prev-1] + buf[lev-1,cur[lev-1]-1] ) /2.
                cts[lev]=0                
                t1_idx=   (countl[lev]-1) *2
                current_img_time = ((time_ind[lev-1])[t1_idx ] +  (time_ind[lev-1])[t1_idx +1 ] )/2. 
                time_ind[lev].append(  current_img_time      )  
                process_two_time(lev=lev, bufno=cur[lev]-1,n=current_img_time,
                        g12=g12, buf=buf, num=num, num_buf=num_buf, noqs=noqs, qind=qind, nopr=nopr, dly=dly)  
                lev+=1
                #//Since this level finished, test if there is a next level for processing
                if lev<num_lev:processing = 1
                else:processing = 0                                
            else:
                cts[lev]=1      #// set flag to process next time
                processing=0    #// can stop until more images are accumulated              
 
        
        if  n %(noframes/10) ==0:
            sys.stdout.write("#")
            sys.stdout.flush()                
    
    
    for q in range(noqs):            
        x0 =  g12[:,:,q]
        g12[:,:,q] = np.tril(x0) +  np.tril(x0).T - np.diag( np.diag(x0) )            
    elapsed_time = time.time() - start_time
    print ('Total time: %.2f min' %(elapsed_time/60.))
    
    
    return g12, elapsed_time/60.
Esempio n. 4
0
def multi_tau_auto_corr(num_levels, num_bufs, labels, images):
    ##comments, please add start_image, end_image, the default as None

    from skxray.core import roi
    from skxray.core import utils as core

    """
    This function computes one-time correlations.
    It uses a scheme to achieve long-time correlations inexpensively
    by downsampling the data, iteratively combining successive frames.
    The longest lag time computed is num_levels * num_bufs.
    Parameters
    ----------
    num_levels : int
        how many generations of downsampling to perform, i.e.,
        the depth of the binomial tree of averaged frames
    num_bufs : int, must be even
        maximum lag step to compute in each generation of
        downsampling
    labels : array
        labeled array of the same shape as the image stack;
        each ROI is represented by a distinct label (i.e., integer)
    images : iterable of 2D arrays
        dimensions are: (rr, cc)
    Returns
    -------
    g2 : array
        matrix of normalized intensity-intensity autocorrelation
        shape (num_levels, number of labels(ROI))
    lag_steps : array
        delay or lag steps for the multiple tau analysis
        shape num_levels
    Notes
    -----
    The normalized intensity-intensity time-autocorrelation function
    is defined as
    :math ::
        g_2(q, t') = \frac{<I(q, t)I(q, t + t')> }{<I(q, t)>^2}
    ; t' > 0
    Here, I(q, t) refers to the scattering strength at the momentum
    transfer vector q in reciprocal space at time t, and the brackets
    <...> refer to averages over time t. The quantity t' denotes the
    delay time
    This implementation is based on code in the language Yorick
    by Mark Sutton, based on published work. [1]_
    References
    ----------
    .. [1] D. Lumma, L. B. Lurio, S. G. J. Mochrie and M. Sutton,
        "Area detector based photon correlation in the regime of
        short data batches: Data reduction for dynamic x-ray
        scattering," Rev. Sci. Instrum., vol 70, p 3274-3289, 2000.
    """
    # In order to calculate correlations for `num_bufs`, images must be
    # kept for up to the maximum lag step. These are stored in the array
    # buffer. This algorithm only keeps number of buffers and delays but
    # several levels of delays number of levels are kept in buf. Each
    # level has twice the delay times of the next lower one. To save
    # needless copying, of cyclic storage of images in buf is used.

    if num_bufs % 2 != 0:
        raise ValueError("number of channels(number of buffers) in " "multiple-taus (must be even)")

    if hasattr(images, "frame_shape"):
        # Give a user-friendly error if we can detect the shape from pims.
        if labels.shape != images.frame_shape:
            raise ValueError("Shape of the image stack should be equal to" " shape of the labels array")

    # get the pixels in each label
    label_mask, pixel_list = roi.extract_label_indices(labels)

    num_rois = np.max(label_mask)

    # number of pixels per ROI
    num_pixels = np.bincount(label_mask, minlength=(num_rois + 1))
    num_pixels = num_pixels[1:]

    if np.any(num_pixels == 0):
        raise ValueError(
            "Number of pixels of the required roi's" " cannot be zero, " "num_pixels = {0}".format(num_pixels)
        )

    # G holds the un normalized auto-correlation result. We
    # accumulate computations into G as the algorithm proceeds.
    G = np.zeros(((num_levels + 1) * num_bufs / 2, num_rois), dtype=np.float64)

    # matrix of past intensity normalizations
    past_intensity_norm = np.zeros(((num_levels + 1) * num_bufs / 2, num_rois), dtype=np.float64)

    # matrix of future intensity normalizations
    future_intensity_norm = np.zeros(((num_levels + 1) * num_bufs / 2, num_rois), dtype=np.float64)

    # Ring buffer, a buffer with periodic boundary conditions.
    # Images must be keep for up to maximum delay in buf.
    buf = np.zeros((num_levels, num_bufs, np.sum(num_pixels)), dtype=np.float64)

    # to track processing each level
    track_level = np.zeros(num_levels)

    # to increment buffer
    cur = np.ones(num_levels, dtype=np.int64)

    # to track how many images processed in each level
    img_per_level = np.zeros(num_levels, dtype=np.int64)

    start_time = time.time()  # used to log the computation time (optionally)

    for n, img in enumerate(images):

        cur[0] = (1 + cur[0]) % num_bufs  # increment buffer

        # Put the image into the ring buffer.
        buf[0, cur[0] - 1] = (np.ravel(img))[pixel_list]

        # Compute the correlations between the first level
        # (undownsampled) frames. This modifies G,
        # past_intensity_norm, future_intensity_norm,
        # and img_per_level in place!
        _process(
            buf,
            G,
            past_intensity_norm,
            future_intensity_norm,
            label_mask,
            num_bufs,
            num_pixels,
            img_per_level,
            level=0,
            buf_no=cur[0] - 1,
        )

        # check whether the number of levels is one, otherwise
        # continue processing the next level
        processing = num_levels > 1

        # Compute the correlations for all higher levels.
        level = 1
        while processing:
            if not track_level[level]:
                track_level[level] = 1
                processing = False
            else:
                prev = 1 + (cur[level - 1] - 2) % num_bufs
                cur[level] = 1 + cur[level] % num_bufs

                buf[level, cur[level] - 1] = (buf[level - 1, prev - 1] + buf[level - 1, cur[level - 1] - 1]) / 2

                # make the track_level zero once that level is processed
                track_level[level] = 0

                # call the _process function for each multi-tau level
                # for multi-tau levels greater than one
                # Again, this is modifying things in place. See comment
                # on previous call above.
                _process(
                    buf,
                    G,
                    past_intensity_norm,
                    future_intensity_norm,
                    label_mask,
                    num_bufs,
                    num_pixels,
                    img_per_level,
                    level=level,
                    buf_no=cur[level] - 1,
                )
                level += 1

                # Checking whether there is next level for processing
                processing = level < num_levels

    # ending time for the process
    end_time = time.time()

    logger.info("Processing time for {0} images took {1} seconds." "".format(n, (end_time - start_time)))

    # the normalization factor
    if len(np.where(past_intensity_norm == 0)[0]) != 0:
        g_max = np.where(past_intensity_norm == 0)[0][0]
    else:
        g_max = past_intensity_norm.shape[0]

    # g2 is normalized G
    g2 = G[:g_max] / (past_intensity_norm[:g_max] * future_intensity_norm[:g_max])

    # Convert from num_levels, num_bufs to lag frames.
    tot_channels, lag_steps = core.multi_tau_lags(num_levels, num_bufs)
    lag_steps = lag_steps[:g_max]

    return g2, lag_steps
Esempio n. 5
0
def show_qzr_map(qr, qz, inc_x0, data=None, Nzline=10, Nrline=10):

    """ 
    Dec 16, 2015, Y.G.@CHX
    plot a qzr map of a gisaxs image (data) 
    
    Parameters:
        qr:  2-D array, qr of a gisaxs image (data)
        qz:  2-D array, qz of a gisaxs image (data)
        inc_x0:  the incident beam center x 
         
    Options:
        data: 2-D array, a gisaxs image, if None, =qr+qz
        Nzline: int, z-line number
        Nrline: int, r-line number
        
        
    Return:
        zticks: list, z-tick positions in unit of pixel
        zticks_label: list, z-tick positions in unit of real space
        rticks: list, r-tick positions in unit of pixel
        rticks_label: list, r-tick positions in unit of real space
   
    
    Examples:
        
        ticks = show_qzr_map(  qr, qz, inc_x0, data = None, Nzline=10, Nrline= 10   )
        ticks = show_qzr_map(  qr,qz, inc_x0, data = avg_imgmr, Nzline=10,  Nrline=10   )
    """

    import matplotlib.pyplot as plt
    import copy
    import matplotlib.cm as mcm

    cmap = "viridis"
    _cmap = copy.copy((mcm.get_cmap(cmap)))
    _cmap.set_under("w", 0)

    qr_start, qr_end, qr_num = qr.min(), qr.max(), Nzline
    qz_start, qz_end, qz_num = qz.min(), qz.max(), Nrline
    qr_edge, qr_center = get_qedge(qr_start, qr_end, (qr_end - qr_start) / (qr_num + 100), qr_num)
    qz_edge, qz_center = get_qedge(qz_start, qz_end, (qz_end - qz_start) / (qz_num + 100), qz_num)

    label_array_qz = get_qmap_label(qz, qz_edge)
    label_array_qr = get_qmap_label(qr, qr_edge)

    labels_qz, indices_qz = roi.extract_label_indices(label_array_qz)
    labels_qr, indices_qr = roi.extract_label_indices(label_array_qr)
    num_qz = len(np.unique(labels_qz))
    num_qr = len(np.unique(labels_qr))

    fig, ax = plt.subplots()
    if data is None:
        data = qr + qz
        im = ax.imshow(data, cmap="viridis", origin="lower")
    else:
        im = ax.imshow(data, cmap="viridis", origin="lower", norm=LogNorm(vmin=0.001, vmax=1e1))

    imr = ax.imshow(label_array_qr, origin="lower", cmap="viridis", vmin=0.5, vmax=None)  # ,interpolation='nearest',)
    imz = ax.imshow(label_array_qz, origin="lower", cmap="viridis", vmin=0.5, vmax=None)  # ,interpolation='nearest',)

    caxr = fig.add_axes([0.81, 0.1, 0.03, 0.8])  # x,y, width, heigth

    cba = fig.colorbar(im, cax=caxr)
    ax.set_xlabel(r"$q_r$", fontsize=18)
    ax.set_ylabel(r"$q_z$", fontsize=18)

    zticks, zticks_label = get_qz_tick_label(qz, label_array_qz)
    # rticks,rticks_label  = get_qr_tick_label(label_array_qr,inc_x0)

    rticks, rticks_label = zip(*sorted(zip(*get_qr_tick_label(qr, label_array_qr, inc_x0))))

    stride = int(len(zticks) / 7)
    ax.set_yticks(zticks[::stride])
    yticks = zticks_label[::stride]
    ax.set_yticklabels(yticks, fontsize=9)

    stride = int(len(rticks) / 7)
    ax.set_xticks(rticks[::stride])
    xticks = rticks_label[::stride]
    ax.set_xticklabels(xticks, fontsize=9)

    ax.set_title("Q-zr_Map", y=1.03, fontsize=18)
    plt.show()
    return zticks, zticks_label, rticks, rticks_label
def autocor_one_time( num_buf,  ring_mask, imgs, num_lev=None, start_img=None, end_img=None, bad_images = None, threshold=None):   
    start_time = time.time()
    #print (dly)
    if start_img is None:
        start_img=0
    if end_img is None:
        try:
            end_img= len(imgs)
        except:
            end_img= imgs.length
            
    #print (start_img, end_img)    
    noframes = end_img - start_img #+ 1
    #print (noframes)
    
    if num_lev is None:num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1
    nolev = num_lev
    nobuf =num_buf
    print ( 'The lev number is %s'%num_lev)
    
    dly, dict_dly = delays( num_lev, num_buf, time=1 )
    #print (dly.max())
    lev_leng = np.array( [  len(  dict_dly[i] ) for i in list(dict_dly.keys())   ])
    
    qind, pixelist = roi.extract_label_indices(   ring_mask  )
    noqs = np.max(qind)    
    nopr = np.bincount(qind, minlength=(noqs+1))[1:]
    nopixels = nopr.sum()     
    start_time = time.time() 
   
    buf =  np.ma.zeros([num_lev,num_buf,nopixels])
    buf.mask = True   
            
    
    cts=np.zeros(num_lev)
    cur=np.ones(num_lev) * num_buf
    countl = np.array( np.zeros(  num_lev ),dtype='int')  
    
    g2 =  np.zeros( [ noframes, noframes, noqs] )   
    
    G=np.zeros( [(nolev+1)*int(nobuf/2),noqs])
    IAP=np.zeros( [(nolev+1)*int(nobuf/2),noqs])
    IAF=np.zeros( [(nolev+1)*int(nobuf/2),noqs])
    num= np.array( np.zeros(  num_lev ),dtype='int')  
    
    Num= { key: [0]* len(  dict_dly[key] ) for key in list(dict_dly.keys())  }
    print ('Doing g2 caculation of %s frames---'%(noframes ))
    ttx=0     
    #if bad_images is None:bad_images=[]
    for n in range( start_img, end_img ):   ##do the work here
        
        img = imgs[n] 
        img_ = (np.ravel(img))[pixelist]
        
        #print ( img_.max() )
        if threshold is not None:
            if img_.max() >= threshold:
                print ('bad image: %s here!'%n)
                img_ =  np.ma.zeros( len(img_) )
                img_.mask = True    
                
        if bad_images is not None:        
            if n in bad_images:
                print ('bad image: %s here!'%n)
                img_ =  np.ma.zeros( len(img_) )
                img_.mask = True 
        
        
        cur[0]=1+cur[0]%num_buf  # increment buffer  
 
        buf[0, cur[0]-1 ]=  img_

        img=[] #//save space 
        img_=[]
        countl[0] = 1+ countl[0]
 
        process_one_time(lev=0, bufno=cur[0]-1,
            G=G,IAP=IAP,IAF=IAF, buf=buf, num=num, num_buf=num_buf, noqs=noqs, qind=qind, nopr=nopr, dly=dly, Num=Num, lev_leng=lev_leng )     
        #time_ind[0].append(  current_img_time   )
        processing=1
        lev=1
        while processing:
            if cts[lev]:
                prev=  1+ (cur[lev-1]-1-1+num_buf)%num_buf
                cur[lev]=  1+ cur[lev]%num_buf
                countl[lev] = 1+ countl[lev] 
 
                bufa = buf[lev-1,prev-1]
                bufb=  buf[lev-1,cur[lev-1]-1] 
                
                if (bufa.data==0).all():
                    buf[lev,cur[lev]-1] =  bufa
                elif (bufb.data==0).all():
                    buf[lev,cur[lev]-1] = bufb 
                else:
                    buf[lev,cur[lev]-1] = ( bufa + bufb ) /2. 
                
                cts[lev]=0                
                t1_idx=   (countl[lev]-1) *2
 
                process_one_time(lev=lev, bufno=cur[lev]-1,
                        G=G,IAP=IAP,IAF=IAF, buf=buf, num=num, num_buf=num_buf, noqs=noqs, qind=qind, nopr=nopr, dly=dly,Num =Num, lev_leng=lev_leng )     
 
                lev+=1
                #//Since this level finished, test if there is a next level for processing
                if lev<num_lev:processing = 1
                else:processing = 0                                
            else:
                cts[lev]=1      #// set flag to process next time
                processing=0    #// can stop until more images are accumulated              
 
        
        if  n %(noframes/10) ==0:
            sys.stdout.write("#")
            sys.stdout.flush()                
    #print G.shape    
    if (len(np.where(IAP==0)[0])!=0) and ( 0 not in nopr):
        gmax = np.where(IAP==0)[0][0]        
    else:
        gmax=IAP.shape[0]
    #g2=G/(IAP*IAF)
    #print G
    g2=(G[:gmax]/(IAP[:gmax]*IAF[:gmax]))       
    elapsed_time = time.time() - start_time
    #print (Num)
    print ('Total time: %.2f min' %(elapsed_time/60.))        
    return  g2,dly[:gmax]  #, elapsed_time/60.
Esempio n. 7
0
def multi_tau_auto_corr(num_levels, num_bufs, labels, images):
    ##comments, please add start_image, end_image, the default as None

    from skxray.core import roi
    from skxray.core import utils as core
    """
    This function computes one-time correlations.
    It uses a scheme to achieve long-time correlations inexpensively
    by downsampling the data, iteratively combining successive frames.
    The longest lag time computed is num_levels * num_bufs.
    Parameters
    ----------
    num_levels : int
        how many generations of downsampling to perform, i.e.,
        the depth of the binomial tree of averaged frames
    num_bufs : int, must be even
        maximum lag step to compute in each generation of
        downsampling
    labels : array
        labeled array of the same shape as the image stack;
        each ROI is represented by a distinct label (i.e., integer)
    images : iterable of 2D arrays
        dimensions are: (rr, cc)
    Returns
    -------
    g2 : array
        matrix of normalized intensity-intensity autocorrelation
        shape (num_levels, number of labels(ROI))
    lag_steps : array
        delay or lag steps for the multiple tau analysis
        shape num_levels
    Notes
    -----
    The normalized intensity-intensity time-autocorrelation function
    is defined as
    :math ::
        g_2(q, t') = \frac{<I(q, t)I(q, t + t')> }{<I(q, t)>^2}
    ; t' > 0
    Here, I(q, t) refers to the scattering strength at the momentum
    transfer vector q in reciprocal space at time t, and the brackets
    <...> refer to averages over time t. The quantity t' denotes the
    delay time
    This implementation is based on code in the language Yorick
    by Mark Sutton, based on published work. [1]_
    References
    ----------
    .. [1] D. Lumma, L. B. Lurio, S. G. J. Mochrie and M. Sutton,
        "Area detector based photon correlation in the regime of
        short data batches: Data reduction for dynamic x-ray
        scattering," Rev. Sci. Instrum., vol 70, p 3274-3289, 2000.
    """
    # In order to calculate correlations for `num_bufs`, images must be
    # kept for up to the maximum lag step. These are stored in the array
    # buffer. This algorithm only keeps number of buffers and delays but
    # several levels of delays number of levels are kept in buf. Each
    # level has twice the delay times of the next lower one. To save
    # needless copying, of cyclic storage of images in buf is used.

    if num_bufs % 2 != 0:
        raise ValueError("number of channels(number of buffers) in "
                         "multiple-taus (must be even)")

    if hasattr(images, 'frame_shape'):
        # Give a user-friendly error if we can detect the shape from pims.
        if labels.shape != images.frame_shape:
            raise ValueError("Shape of the image stack should be equal to"
                             " shape of the labels array")

    # get the pixels in each label
    label_mask, pixel_list = roi.extract_label_indices(labels)

    num_rois = np.max(label_mask)

    # number of pixels per ROI
    num_pixels = np.bincount(label_mask, minlength=(num_rois + 1))
    num_pixels = num_pixels[1:]

    if np.any(num_pixels == 0):
        raise ValueError("Number of pixels of the required roi's"
                         " cannot be zero, "
                         "num_pixels = {0}".format(num_pixels))

    # G holds the un normalized auto-correlation result. We
    # accumulate computations into G as the algorithm proceeds.
    G = np.zeros(((num_levels + 1) * num_bufs / 2, num_rois), dtype=np.float64)

    # matrix of past intensity normalizations
    past_intensity_norm = np.zeros(((num_levels + 1) * num_bufs / 2, num_rois),
                                   dtype=np.float64)

    # matrix of future intensity normalizations
    future_intensity_norm = np.zeros(
        ((num_levels + 1) * num_bufs / 2, num_rois), dtype=np.float64)

    # Ring buffer, a buffer with periodic boundary conditions.
    # Images must be keep for up to maximum delay in buf.
    buf = np.zeros((num_levels, num_bufs, np.sum(num_pixels)),
                   dtype=np.float64)

    # to track processing each level
    track_level = np.zeros(num_levels)

    # to increment buffer
    cur = np.ones(num_levels, dtype=np.int64)

    # to track how many images processed in each level
    img_per_level = np.zeros(num_levels, dtype=np.int64)

    start_time = time.time()  # used to log the computation time (optionally)

    for n, img in enumerate(images):

        cur[0] = (1 + cur[0]) % num_bufs  # increment buffer

        # Put the image into the ring buffer.
        buf[0, cur[0] - 1] = (np.ravel(img))[pixel_list]

        # Compute the correlations between the first level
        # (undownsampled) frames. This modifies G,
        # past_intensity_norm, future_intensity_norm,
        # and img_per_level in place!
        _process(buf,
                 G,
                 past_intensity_norm,
                 future_intensity_norm,
                 label_mask,
                 num_bufs,
                 num_pixels,
                 img_per_level,
                 level=0,
                 buf_no=cur[0] - 1)

        # check whether the number of levels is one, otherwise
        # continue processing the next level
        processing = num_levels > 1

        # Compute the correlations for all higher levels.
        level = 1
        while processing:
            if not track_level[level]:
                track_level[level] = 1
                processing = False
            else:
                prev = 1 + (cur[level - 1] - 2) % num_bufs
                cur[level] = 1 + cur[level] % num_bufs

                buf[level,
                    cur[level] - 1] = (buf[level - 1, prev - 1] +
                                       buf[level - 1, cur[level - 1] - 1]) / 2

                # make the track_level zero once that level is processed
                track_level[level] = 0

                # call the _process function for each multi-tau level
                # for multi-tau levels greater than one
                # Again, this is modifying things in place. See comment
                # on previous call above.
                _process(
                    buf,
                    G,
                    past_intensity_norm,
                    future_intensity_norm,
                    label_mask,
                    num_bufs,
                    num_pixels,
                    img_per_level,
                    level=level,
                    buf_no=cur[level] - 1,
                )
                level += 1

                # Checking whether there is next level for processing
                processing = level < num_levels

    # ending time for the process
    end_time = time.time()

    logger.info("Processing time for {0} images took {1} seconds."
                "".format(n, (end_time - start_time)))

    # the normalization factor
    if len(np.where(past_intensity_norm == 0)[0]) != 0:
        g_max = np.where(past_intensity_norm == 0)[0][0]
    else:
        g_max = past_intensity_norm.shape[0]

    # g2 is normalized G
    g2 = (G[:g_max] /
          (past_intensity_norm[:g_max] * future_intensity_norm[:g_max]))

    # Convert from num_levels, num_bufs to lag frames.
    tot_channels, lag_steps = core.multi_tau_lags(num_levels, num_bufs)
    lag_steps = lag_steps[:g_max]

    return g2, lag_steps
Esempio n. 8
0
def xsvs(image_sets,
         label_array,
         number_of_img,
         timebin_num=2,
         time_bin=None,
         max_cts=None,
         bad_images=None,
         threshold=None):
    """
    This function will provide the probability density of detecting photons
    for different integration times.
    The experimental probability density P(K) of detecting photons K is
    obtained by histogramming the speckle counts over an ensemble of
    equivalent pixels and over a number of speckle patterns recorded
    with the same integration time T under the same condition.
    Parameters
    ----------
    image_sets : array
        sets of images
    label_array : array
        labeled array; 0 is background.
        Each ROI is represented by a distinct label (i.e., integer).
    number_of_img : int
        number of images (how far to go with integration times when finding
        the time_bin, using skxray.utils.geometric function)
    timebin_num : int, optional
        integration time; default is 2
    max_cts : int, optional
       the brightest pixel in any ROI in any image in the image set.
       defaults to using skxray.core.roi.roi_max_counts to determine
       the brightest pixel in any of the ROIs
       
       
    bad_images: array, optional
        the bad images number list, the XSVS will not analyze the binning image groups which involve any bad images
    threshold: float, optional
        If one image involves a pixel with intensity above threshold, such image will be considered as a bad image.
    
    
    Returns
    -------
    prob_k_all : array
        probability density of detecting photons
    prob_k_std_dev : array
        standard deviation of probability density of detecting photons
    Notes
    -----
    These implementation is based on following references
    References: text [1]_, text [2]_
    .. [1] L. Li, P. Kwasniewski, D. Oris, L Wiegart, L. Cristofolini,
       C. Carona and A. Fluerasu , "Photon statistics and speckle visibility
       spectroscopy with partially coherent x-rays" J. Synchrotron Rad.,
       vol 21, p 1288-1295, 2014.
    .. [2] R. Bandyopadhyay, A. S. Gittings, S. S. Suh, P.K. Dixon and
       D.J. Durian "Speckle-visibilty Spectroscopy: A tool to study
       time-varying dynamics" Rev. Sci. Instrum. vol 76, p  093110, 2005.
    There is an example in https://github.com/scikit-xray/scikit-xray-examples
    It will demonstrate the use of these functions in this module for
    experimental data.
    """
    if max_cts is None:
        max_cts = roi.roi_max_counts(image_sets, label_array)

    # find the label's and pixel indices for ROI's
    labels, indices = roi.extract_label_indices(label_array)
    nopixels = len(indices)
    # number of ROI's
    u_labels = list(np.unique(labels))
    num_roi = len(u_labels)

    # create integration times
    if time_bin is None:
        time_bin = geometric_series(timebin_num, number_of_img)

    # number of times in the time bin
    num_times = len(time_bin)

    # number of pixels per ROI
    num_pixels = np.bincount(labels, minlength=(num_roi + 1))[1:]

    # probability density of detecting photons
    prob_k_all = np.zeros([num_times, num_roi], dtype=np.object)

    # square of probability density of detecting photons
    prob_k_pow_all = np.zeros_like(prob_k_all)

    # standard deviation of probability density of detecting photons
    prob_k_std_dev = np.zeros_like(prob_k_all)

    # get the bin edges for each time bin for each ROI
    bin_edges = np.zeros(prob_k_all.shape[0], dtype=prob_k_all.dtype)
    for i in range(num_times):
        bin_edges[i] = np.arange(max_cts * 2**i)

    start_time = time.time()  # used to log the computation time (optionally)

    for i, images in enumerate(image_sets):
        # Ring buffer, a buffer with periodic boundary conditions.
        # Images must be keep for up to maximum delay in buf.
        #buf = np.zeros([num_times, timebin_num], dtype=np.object)  # matrix of buffers

        buf = np.ma.zeros([num_times, timebin_num, nopixels])
        buf.mask = True

        # to track processing each time level
        track_level = np.zeros(num_times)
        track_bad_level = np.zeros(num_times)
        # to increment buffer
        cur = np.full(num_times, timebin_num)

        # to track how many images processed in each level
        img_per_level = np.zeros(num_times, dtype=np.int64)

        prob_k = np.zeros_like(prob_k_all)
        prob_k_pow = np.zeros_like(prob_k_all)

        try:
            noframes = len(images)
        except:
            noframes = images.length

        #Num= { key: [0]* len(  dict_dly[key] ) for key in list(dict_dly.keys())  }

        for n, img in enumerate(images):
            cur[0] = 1 + cur[0] % timebin_num
            # read each frame
            # Put the image into the ring buffer.

            img_ = (np.ravel(img))[indices]

            if threshold is not None:
                if img_.max() >= threshold:
                    print('bad image: %s here!' % n)
                    img_ = np.ma.zeros(len(img_))
                    img_.mask = True

            if bad_images is not None:
                if n in bad_images:
                    print('bad image: %s here!' % n)
                    img_ = np.ma.zeros(len(img_))
                    img_.mask = True

            buf[0, cur[0] - 1] = img_

            _process(num_roi, 0, cur[0] - 1, buf, img_per_level, labels,
                     max_cts, bin_edges[0], prob_k, prob_k_pow,
                     track_bad_level)

            #print (0, img_per_level)

            # check whether the number of levels is one, otherwise
            # continue processing the next level
            level = 1
            processing = 1
            #print ('track_level: %s'%track_level)
            #while level < num_times:
            #if not track_level[level]:
            #track_level[level] = 1
            while processing:
                if track_level[level]:
                    prev = 1 + (cur[level - 1] - 2) % timebin_num
                    cur[level] = 1 + cur[level] % timebin_num

                    bufa = buf[level - 1, prev - 1]
                    bufb = buf[level - 1, cur[level - 1] - 1]

                    if (bufa.data == 0).all():
                        buf[level, cur[level] - 1] = bufa
                    elif (bufb.data == 0).all():
                        buf[level, cur[level] - 1] = bufb
                    else:
                        buf[level, cur[level] - 1] = bufa + bufb

                    #print (level, cur[level]-1)

                    track_level[level] = 0

                    _process(num_roi, level, cur[level] - 1, buf,
                             img_per_level, labels, max_cts, bin_edges[level],
                             prob_k, prob_k_pow, track_bad_level)
                    level += 1
                    if level < num_times: processing = 1
                    else: processing = 0

                else:
                    track_level[level] = 1
                    processing = 0
                #print ('track_level: %s'%track_level)

            if noframes >= 10 and n % (int(noframes / 10)) == 0:
                sys.stdout.write("#")
                sys.stdout.flush()

            prob_k_all += (prob_k - prob_k_all) / (i + 1)
            prob_k_pow_all += (prob_k_pow - prob_k_pow_all) / (i + 1)

    prob_k_std_dev = np.power((prob_k_pow_all - np.power(prob_k_all, 2)), .5)

    for i in range(num_times):
        if isinstance(prob_k_all[i, 0], float):
            for j in range(len(u_labels)):
                prob_k_all[i, j] = np.array([0] * (len(bin_edges[i]) - 1))
                prob_k_std_dev[i, j] = np.array([0] * (len(bin_edges[i]) - 1))

    logger.info("Processing time for XSVS took %s seconds."
                "", (time.time() - start_time))
    elapsed_time = time.time() - start_time
    #print (Num)
    print('Total time: %.2f min' % (elapsed_time / 60.))

    #print (img_per_level - track_bad_level)
    #print (buf)

    return prob_k_all, prob_k_std_dev
def auto_two_Array_g1_norm(data, rois, data_pixel=None):
    ''' 
    Dec 16, 2015, Y.G.@CHX
    a numpy operation method to get two-time correlation function with a normalization
    the purpose for this fucntion is to get a exactly same result as one-time correlation function
    
    Parameters:
        data:  images sequence, shape as [img[0], img[1], imgs_length]
        rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs
    
    Options:
        
        data_pixel: if not None,    
                    2-D array, shape as (len(images), len(qind)),
                    use function Get_Pixel_Array( ).get_data(  ) to get 
         
   
    Return:
        g12b_norm: a 3-D array, shape as ( imgs_length, imgs_length, q), 
                   a convention two-time correlation functio  
                   same as obtained by auto_two_Array( data, roi, data_pixel=None  ) 
        g12b:      a 3-D array, shape as ( imgs_length, imgs_length, q), 
                   a non-normlized two-time correlation function
                   
        norms:     a 2-D array, shape as ( imgs_length,   q), a normalization for further get one-time from two time  
     
    One example:        
        g12b_norm, g12b_not_norm, norms = auto_two_Array_g1_norm( imgsr, ring_mask, data_pixel = data_pixel ) 
        
    '''

    start_time = time.time()
    qind, pixelist = roi.extract_label_indices(rois)
    noqs = len(np.unique(qind))
    nopr = np.bincount(qind, minlength=(noqs + 1))[1:]
    if data_pixel is None:
        data_pixel = Get_Pixel_Array(data, pixelist).get_data()
        #print (data_pixel.shape)

    try:
        noframes = len(data)
    except:
        noframes = data.length
    g12b_norm = np.zeros([noframes, noframes, noqs])
    g12b = np.zeros([noframes, noframes, noqs])
    norms = np.zeros([noframes, noqs])

    Unitq = (noqs / 10)
    proi = 0

    for qi in range(1, noqs + 1):
        pixelist_qi = np.where(qind == qi)[0]
        #print (pixelist_qi.shape,  data_pixel[qi].shape)
        data_pixel_qi = data_pixel[:, pixelist_qi]

        sum1 = (np.average(data_pixel_qi, axis=1)).reshape(1, noframes)
        sum2 = sum1.T
        #norms_g12  =  sum1 * sum2 * nopr[qi -1]
        norms[:, qi - 1] = sum1

        g12b[:, :, qi - 1] = np.dot(data_pixel_qi, data_pixel_qi.T)
        g12b_norm[:, :,
                  qi - 1] = g12b[:, :, qi - 1] / sum1 / sum2 / nopr[qi - 1]
        #print ( proi, int( qi //( Unitq) ) )
        if int(qi // (Unitq)) == proi:
            sys.stdout.write("#")
            sys.stdout.flush()
            proi += 1

    elapsed_time = time.time() - start_time
    print('Total time: %.2f min' % (elapsed_time / 60.))

    return g12b_norm, g12b, norms
def auto_two_Array(data, rois, data_pixel=None):
    ''' 
    Dec 16, 2015, Y.G.@CHX
    a numpy operation method to get two-time correlation function 
    
    Parameters:
        data:  images sequence, shape as [img[0], img[1], imgs_length]
        rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs
    
    Options:
        
        data_pixel: if not None,    
                    2-D array, shape as (len(images), len(qind)),
                    use function Get_Pixel_Array( ).get_data(  ) to get 
         
   
    Return:
        g12: a 3-D array, shape as ( imgs_length, imgs_length, q)
     
    One example:        
        g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) 
    '''

    start_time = time.time()

    qind, pixelist = roi.extract_label_indices(rois)
    noqs = len(np.unique(qind))
    nopr = np.bincount(qind, minlength=(noqs + 1))[1:]

    if data_pixel is None:
        data_pixel = Get_Pixel_Array(data, pixelist).get_data()
        #print (data_pixel.shape)

    try:
        noframes = len(data)
    except:
        noframes = data.length
    g12b = np.zeros([noframes, noframes, noqs])
    Unitq = (noqs / 10)
    proi = 0

    for qi in range(1, noqs + 1):
        pixelist_qi = np.where(qind == qi)[0]
        #print (pixelist_qi.shape,  data_pixel[qi].shape)
        data_pixel_qi = data_pixel[:, pixelist_qi]

        sum1 = (np.average(data_pixel_qi, axis=1)).reshape(1, noframes)
        sum2 = sum1.T

        g12b[:, :, qi - 1] = np.dot(
            data_pixel_qi, data_pixel_qi.T) / sum1 / sum2 / nopr[qi - 1]
        #print ( proi, int( qi //( Unitq) ) )
        if int(qi // (Unitq)) == proi:
            sys.stdout.write("#")
            sys.stdout.flush()
            proi += 1

    elapsed_time = time.time() - start_time
    print('Total time: %.2f min' % (elapsed_time / 60.))

    return g12b
def autocor_one_time(num_buf,
                     rois,
                     imgs,
                     num_lev=None,
                     start_img=None,
                     end_img=None,
                     bad_images=None,
                     threshold=None):
    ''' 
    Dec 16, 2015, Y.G.@CHX
    a multi-tau code for one-time correlation function,
    add new funciton to deal with bad images, which masked intensities are still
    large than threshold
    
    Parameters:
        num_buf: int, number of buffer
        rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs
        imgs: pims sequences, image stack
    Options:
        num_lev: int, number of level, if None: = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1
        start_img: int, None, =0
        end_img: int, None, = len(imgs)
        bad_images: list, None,bad_images list
        threshold: float, None, intensity max threshold, above which is considered as bad images
   
    Return:
    g2, 2D-array, shape as (tau, q)
    tau, 1D-array    
    
    One example:
        
        g2, tau = autocor_one_time( num_buf,  ring_mask, imgsr, num_lev=None,
                                       bad_images=None, threshold= 65500 )
    '''

    start_time = time.time()
    #print (dly)
    if start_img is None:
        start_img = 0
    if end_img is None:
        try:
            end_img = len(imgs)
        except:
            end_img = imgs.length

    #print (start_img, end_img)
    noframes = end_img - start_img  #+ 1
    #print (noframes)
    ring_mask = rois
    if num_lev is None:
        num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1
    nolev = num_lev
    nobuf = num_buf
    print('The lev number is %s' % num_lev)

    dly, dict_dly = delays(num_lev, num_buf, time=1)
    #print (dly.max())
    lev_leng = np.array([len(dict_dly[i]) for i in list(dict_dly.keys())])

    qind, pixelist = roi.extract_label_indices(ring_mask)
    noqs = np.max(qind)
    nopr = np.bincount(qind, minlength=(noqs + 1))[1:]
    nopixels = nopr.sum()
    start_time = time.time()

    buf = np.ma.zeros([num_lev, num_buf, nopixels])
    buf.mask = True

    cts = np.zeros(num_lev)
    cur = np.ones(num_lev) * num_buf
    countl = np.array(np.zeros(num_lev), dtype='int')

    g2 = np.zeros([noframes, noframes, noqs])

    G = np.zeros([(nolev + 1) * int(nobuf / 2), noqs])
    IAP = np.zeros([(nolev + 1) * int(nobuf / 2), noqs])
    IAF = np.zeros([(nolev + 1) * int(nobuf / 2), noqs])
    num = np.array(np.zeros(num_lev), dtype='int')

    Num = {key: [0] * len(dict_dly[key]) for key in list(dict_dly.keys())}
    print('Doing g2 caculation of %s frames---' % (noframes))
    ttx = 0
    #if bad_images is None:bad_images=[]
    for n in range(start_img, end_img):  ##do the work here
        img = imgs[n]

        #for n, img in enumerate( imgs):
        img_ = (np.ravel(img))[pixelist]

        #print ( img_.max() )
        if threshold is not None:
            if img_.max() >= threshold:
                print('bad image: %s here!' % n)
                img_ = np.ma.zeros(len(img_))
                img_.mask = True

        if bad_images is not None:
            if n in bad_images:
                print('bad image: %s here!' % n)
                img_ = np.ma.zeros(len(img_))
                img_.mask = True

        cur[0] = 1 + cur[0] % num_buf  # increment buffer

        buf[0, cur[0] - 1] = img_

        img = []  #//save space
        img_ = []
        countl[0] = 1 + countl[0]

        process_one_time(lev=0,
                         bufno=cur[0] - 1,
                         G=G,
                         IAP=IAP,
                         IAF=IAF,
                         buf=buf,
                         num=num,
                         num_buf=num_buf,
                         noqs=noqs,
                         qind=qind,
                         nopr=nopr,
                         dly=dly,
                         Num=Num,
                         lev_leng=lev_leng)
        #time_ind[0].append(  current_img_time   )
        processing = 1
        lev = 1
        while processing:
            if cts[lev]:
                prev = 1 + (cur[lev - 1] - 1 - 1 + num_buf) % num_buf
                cur[lev] = 1 + cur[lev] % num_buf
                countl[lev] = 1 + countl[lev]

                bufa = buf[lev - 1, prev - 1]
                bufb = buf[lev - 1, cur[lev - 1] - 1]

                if (bufa.data == 0).all():
                    buf[lev, cur[lev] - 1] = bufa
                elif (bufb.data == 0).all():
                    buf[lev, cur[lev] - 1] = bufb
                else:
                    buf[lev, cur[lev] - 1] = (bufa + bufb) / 2.

                cts[lev] = 0
                t1_idx = (countl[lev] - 1) * 2

                process_one_time(lev=lev,
                                 bufno=cur[lev] - 1,
                                 G=G,
                                 IAP=IAP,
                                 IAF=IAF,
                                 buf=buf,
                                 num=num,
                                 num_buf=num_buf,
                                 noqs=noqs,
                                 qind=qind,
                                 nopr=nopr,
                                 dly=dly,
                                 Num=Num,
                                 lev_leng=lev_leng)

                lev += 1
                #//Since this level finished, test if there is a next level for processing
                if lev < num_lev: processing = 1
                else: processing = 0
            else:
                cts[lev] = 1  #// set flag to process next time
                processing = 0  #// can stop until more images are accumulated

        if n % (int(noframes / 10)) == 0:
            sys.stdout.write("#")
            sys.stdout.flush()
    #print G.shape
    if (len(np.where(IAP == 0)[0]) != 0) and (0 not in nopr):
        gmax = np.where(IAP == 0)[0][0]
    else:
        gmax = IAP.shape[0]
    #g2=G/(IAP*IAF)
    #print G
    g2 = (G[:gmax] / (IAP[:gmax] * IAF[:gmax]))
    elapsed_time = time.time() - start_time
    #print (Num)
    print('Total time: %.2f min' % (elapsed_time / 60.))
    return g2, dly[:gmax]  #, elapsed_time/60.
def autocor_two_time(num_buf,
                     rois,
                     imgs,
                     num_lev=None,
                     start_img=None,
                     end_img=None):
    ''' 
    Dec 16, 2015, Y.G.@CHX
    a multi-tau code for two-time correlation function  
    
    Parameters:
        num_buf: int, number of buffer
        rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs
        imgs: pims sequences, image stack
    Options:
        num_lev: int, number of level, if None: = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1
        start_img: int, None, =0
        end_img: int, None, = len(imgs)
        #to be done to deal with bad frames
        #bad_images: list, None,bad_images list
        #threshold: float, None, intensity max threshold, above which is considered as bad images
   
    Return:
    g12, 3D-array, shape as ( len(imgs), len(imgs), q)
       
        
    One example:
        
        g12  = autocor_two_time( num_buf,  ring_mask, imgsr, num_lev=None )
    '''

    #print (dly)
    if start_img is None: start_img = 0
    if end_img is None:
        try:
            end_img = len(imgs)
        except:
            end_img = imgs.length

    #print (start_img, end_img)
    noframes = end_img - start_img  #+ 1
    #print (noframes)
    ring_mask = rois
    if num_lev is None:
        num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1
    print('The lev number is %s' % num_lev)

    dly, dict_dly = delays(num_lev, num_buf, time=1)
    #print (dly.max())

    qind, pixelist = roi.extract_label_indices(ring_mask)
    noqs = np.max(qind)
    nopr = np.bincount(qind, minlength=(noqs + 1))[1:]
    nopixels = nopr.sum()

    start_time = time.time()

    buf = np.zeros([num_lev, num_buf,
                    nopixels])  #// matrix of buffers, for store img

    cts = np.zeros(num_lev)
    cur = np.ones(num_lev) * num_buf
    countl = np.array(np.zeros(num_lev), dtype='int')

    g12 = np.zeros([noframes, noframes, noqs])

    num = np.array(np.zeros(num_lev), dtype='int')
    time_ind = {key: [] for key in range(num_lev)}

    ttx = 0
    for n in range(start_img, end_img):  ##do the work here

        cur[0] = 1 + cur[0] % num_buf  # increment buffer
        img = imgs[n]

        #print ( 'The insert image is %s' %(n) )

        buf[0, cur[0] - 1] = (np.ravel(img))[pixelist]
        img = []  #//save space
        countl[0] = 1 + countl[0]
        current_img_time = n - start_img + 1

        process_two_time(lev=0,
                         bufno=cur[0] - 1,
                         n=current_img_time,
                         g12=g12,
                         buf=buf,
                         num=num,
                         num_buf=num_buf,
                         noqs=noqs,
                         qind=qind,
                         nopr=nopr,
                         dly=dly)
        time_ind[0].append(current_img_time)
        processing = 1
        lev = 1
        while processing:
            if cts[lev]:
                prev = 1 + (cur[lev - 1] - 1 - 1 + num_buf) % num_buf
                cur[lev] = 1 + cur[lev] % num_buf
                countl[lev] = 1 + countl[lev]
                buf[lev, cur[lev] - 1] = (buf[lev - 1, prev - 1] +
                                          buf[lev - 1, cur[lev - 1] - 1]) / 2.
                cts[lev] = 0
                t1_idx = (countl[lev] - 1) * 2
                current_img_time = ((time_ind[lev - 1])[t1_idx] +
                                    (time_ind[lev - 1])[t1_idx + 1]) / 2.
                time_ind[lev].append(current_img_time)
                process_two_time(lev=lev,
                                 bufno=cur[lev] - 1,
                                 n=current_img_time,
                                 g12=g12,
                                 buf=buf,
                                 num=num,
                                 num_buf=num_buf,
                                 noqs=noqs,
                                 qind=qind,
                                 nopr=nopr,
                                 dly=dly)
                lev += 1
                #//Since this level finished, test if there is a next level for processing
                if lev < num_lev: processing = 1
                else: processing = 0
            else:
                cts[lev] = 1  #// set flag to process next time
                processing = 0  #// can stop until more images are accumulated

        if n % (noframes / 10) == 0:
            sys.stdout.write("#")
            sys.stdout.flush()

    for q in range(noqs):
        x0 = g12[:, :, q]
        g12[:, :, q] = np.tril(x0) + np.tril(x0).T - np.diag(np.diag(x0))
    elapsed_time = time.time() - start_time
    print('Total time: %.2f min' % (elapsed_time / 60.))

    return g12, elapsed_time / 60.
Esempio n. 13
0
def xsvs(image_sets, label_array, number_of_img, timebin_num=2,
         max_cts=None, bad_images = None, threshold=None):   
    """
    This function will provide the probability density of detecting photons
    for different integration times.
    The experimental probability density P(K) of detecting photons K is
    obtained by histogramming the speckle counts over an ensemble of
    equivalent pixels and over a number of speckle patterns recorded
    with the same integration time T under the same condition.
    Parameters
    ----------
    image_sets : array
        sets of images
    label_array : array
        labeled array; 0 is background.
        Each ROI is represented by a distinct label (i.e., integer).
    number_of_img : int
        number of images (how far to go with integration times when finding
        the time_bin, using skxray.utils.geometric function)
    timebin_num : int, optional
        integration time; default is 2
    max_cts : int, optional
       the brightest pixel in any ROI in any image in the image set.
       defaults to using skxray.core.roi.roi_max_counts to determine
       the brightest pixel in any of the ROIs
       
       
    bad_images: array, optional
        the bad images number list, the XSVS will not analyze the binning image groups which involve any bad images
    threshold: float, optional
        If one image involves a pixel with intensity above threshold, such image will be considered as a bad image.
    
    
    Returns
    -------
    prob_k_all : array
        probability density of detecting photons
    prob_k_std_dev : array
        standard deviation of probability density of detecting photons
    Notes
    -----
    These implementation is based on following references
    References: text [1]_, text [2]_
    .. [1] L. Li, P. Kwasniewski, D. Oris, L Wiegart, L. Cristofolini,
       C. Carona and A. Fluerasu , "Photon statistics and speckle visibility
       spectroscopy with partially coherent x-rays" J. Synchrotron Rad.,
       vol 21, p 1288-1295, 2014.
    .. [2] R. Bandyopadhyay, A. S. Gittings, S. S. Suh, P.K. Dixon and
       D.J. Durian "Speckle-visibilty Spectroscopy: A tool to study
       time-varying dynamics" Rev. Sci. Instrum. vol 76, p  093110, 2005.
    There is an example in https://github.com/scikit-xray/scikit-xray-examples
    It will demonstrate the use of these functions in this module for
    experimental data.
    """
    if max_cts is None:
        max_cts = roi.roi_max_counts(image_sets, label_array)

    # find the label's and pixel indices for ROI's
    labels, indices = roi.extract_label_indices(label_array)

    # number of ROI's
    u_labels = list(np.unique(labels))
    num_roi = len(u_labels)

    # create integration times
    time_bin = geometric_series(timebin_num, number_of_img)

    # number of times in the time bin
    num_times = len(time_bin)

    # number of pixels per ROI
    num_pixels = np.bincount(labels, minlength=(num_roi+1))[1:]

    # probability density of detecting photons
    prob_k_all = np.zeros([num_times, num_roi], dtype=np.object)

    # square of probability density of detecting photons
    prob_k_pow_all = np.zeros_like(prob_k_all)

    # standard deviation of probability density of detecting photons
    prob_k_std_dev = np.zeros_like(prob_k_all)

    # get the bin edges for each time bin for each ROI
    bin_edges = np.zeros(prob_k_all.shape[0], dtype=prob_k_all.dtype)
    for i in range(num_times):
        bin_edges[i] = np.arange(max_cts*2**i)

    start_time = time.time()  # used to log the computation time (optionally)

    for i, images in enumerate(image_sets):
        # Ring buffer, a buffer with periodic boundary conditions.
        # Images must be keep for up to maximum delay in buf.
        buf = np.zeros([num_times, timebin_num],
                       dtype=np.object)  # matrix of buffers

        # to track processing each time level
        track_level = np.zeros( num_times )

        # to increment buffer
        cur = np.full(num_times, timebin_num)

        # to track how many images processed in each level
        img_per_level = np.zeros(num_times, dtype=np.int64)

        prob_k = np.zeros_like(prob_k_all)
        prob_k_pow = np.zeros_like(prob_k_all)
 
        try:
            noframes= len(images)
        except:
            noframes= images.length
            
        
        #Num= { key: [0]* len(  dict_dly[key] ) for key in list(dict_dly.keys())  }
        
        for n, img in enumerate(images):
            cur[0] = 1 + cur[0]% timebin_num
            # read each frame
            # Put the image into the ring buffer.
            buf[0, cur[0] - 1] = (np.ravel(img))[indices]
            
            #print (n, cur[0]-1)
            #print (buf.shape)
            

            _process(num_roi, 0, cur[0] - 1, buf, img_per_level, labels,
                     max_cts, bin_edges[0], prob_k, prob_k_pow)
            
            #print (0, img_per_level)

            # check whether the number of levels is one, otherwise
            # continue processing the next level
            level = 1
            processing=1   
            #print ('track_level: %s'%track_level)
            #while level < num_times:
                #if not track_level[level]:
                    #track_level[level] = 1
            while processing:
                if track_level[level]:
                    prev = 1 + (cur[level - 1] - 2) % timebin_num
                    cur[level] = 1 + cur[level] % timebin_num

                    buf[level, cur[level]-1] = (buf[level-1,
                                                    prev-1] +
                                                buf[level-1,
                                                    cur[level - 1] - 1])
                    
                    #print (level, cur[level]-1)
                    
                    
                    track_level[level] = 0

                    _process(num_roi, level, cur[level]-1, buf, img_per_level,
                             labels, max_cts, bin_edges[level], prob_k,
                             prob_k_pow)
                    level += 1
                    if level < num_times:processing = 1
                    else:processing = 0
                    
                else:
                    track_level[level] = 1
                    processing = 0
                #print ('track_level: %s'%track_level)
            
            if  n %( int(noframes/10) ) ==0:
                sys.stdout.write("#")
                sys.stdout.flush() 
            

            prob_k_all += (prob_k - prob_k_all)/(i + 1)
            prob_k_pow_all += (prob_k_pow - prob_k_pow_all)/(i + 1)

    prob_k_std_dev = np.power((prob_k_pow_all -
                               np.power(prob_k_all, 2)), .5)

    logger.info("Processing time for XSVS took %s seconds."
                "", (time.time() - start_time))
    elapsed_time = time.time() - start_time
    #print (Num)
    print ('Total time: %.2f min' %(elapsed_time/60.)) 
    
    print (img_per_level)
    #print (buf)
    
    return prob_k_all, prob_k_std_dev
def autocor_two_time( num_buf,  ring_mask, imgs, num_lev=None, start_img=None, end_img=None    ):
    

    #print (dly)
    if start_img is None:start_img=0
    if end_img is None:
        try:
            end_img= len(imgs)
        except:
            end_img= imgs.length
            
    #print (start_img, end_img)    
    noframes = end_img - start_img #+ 1
    #print (noframes)
    
    if num_lev is None:num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1
    print ( 'The lev number is %s'%num_lev)
    
    dly, dict_dly = delays( num_lev, num_buf, time=1 )
    #print (dly.max())
    
    qind, pixelist = roi.extract_label_indices(   ring_mask  )
    noqs = np.max(qind)    
    nopr = np.bincount(qind, minlength=(noqs+1))[1:]
    nopixels = nopr.sum() 
    
    start_time = time.time()
    
    buf=np.zeros([num_lev,num_buf,nopixels])  #// matrix of buffers, for store img
    
    
    cts=np.zeros(num_lev)
    cur=np.ones(num_lev) * num_buf
    countl = np.array( np.zeros(  num_lev ),dtype='int')  
    
    g12 =  np.zeros( [ noframes, noframes, noqs] )      
    
    num= np.array( np.zeros(  num_lev ),dtype='int')          
    time_ind ={key: [] for key in range(num_lev)}   
    
    ttx=0        
    for n in range( start_img, end_img ):   ##do the work here
        
        cur[0]=1+cur[0]%num_buf  # increment buffer  
        img = imgs[n] 
        
        #print ( 'The insert image is %s' %(n) )
    
        buf[0, cur[0]-1 ]=  (np.ravel(img))[pixelist]
        img=[] #//save space 
        countl[0] = 1+ countl[0]
        current_img_time = n - start_img +1
    
        process_two_time(lev=0, bufno=cur[0]-1,n=current_img_time,
                        g12=g12, buf=buf, num=num, num_buf=num_buf, noqs=noqs, qind=qind, nopr=nopr, dly=dly)     
        time_ind[0].append(  current_img_time   )
        processing=1
        lev=1
        while processing:
            if cts[lev]:
                prev=  1+ (cur[lev-1]-1-1+num_buf)%num_buf
                cur[lev]=  1+ cur[lev]%num_buf
                countl[lev] = 1+ countl[lev]                                
                buf[lev,cur[lev]-1] = ( buf[lev-1,prev-1] + buf[lev-1,cur[lev-1]-1] ) /2.
                cts[lev]=0                
                t1_idx=   (countl[lev]-1) *2
                current_img_time = ((time_ind[lev-1])[t1_idx ] +  (time_ind[lev-1])[t1_idx +1 ] )/2. 
                time_ind[lev].append(  current_img_time      )  
                process_two_time(lev=lev, bufno=cur[lev]-1,n=current_img_time,
                        g12=g12, buf=buf, num=num, num_buf=num_buf, noqs=noqs, qind=qind, nopr=nopr, dly=dly)  
                lev+=1
                #//Since this level finished, test if there is a next level for processing
                if lev<num_lev:processing = 1
                else:processing = 0                                
            else:
                cts[lev]=1      #// set flag to process next time
                processing=0    #// can stop until more images are accumulated              
 
        
        if  n %(noframes/10) ==0:
            sys.stdout.write("#")
            sys.stdout.flush()                
    
    
    for q in range(noqs):            
        x0 =  g12[:,:,q]
        g12[:,:,q] = np.tril(x0) +  np.tril(x0).T - np.diag( np.diag(x0) )            
    elapsed_time = time.time() - start_time
    print ('Total time: %.2f min' %(elapsed_time/60.))
    
    
    return g12, elapsed_time/60.
def auto_two_Array( data, rois, data_pixel=None  ):
    
    ''' 
    Dec 16, 2015, Y.G.@CHX
    a numpy operation method to get two-time correlation function 
    
    Parameters:
        data:  images sequence, shape as [img[0], img[1], imgs_length]
        rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs
    
    Options:
        
        data_pixel: if not None,    
                    2-D array, shape as (len(images), len(qind)),
                    use function Get_Pixel_Array( ).get_data(  ) to get 
         
   
    Return:
        g12: a 3-D array, shape as ( imgs_length, imgs_length, q)
     
    One example:        
        g12 = auto_two_Array( imgsr, ring_mask, data_pixel = data_pixel ) 
    '''
      
        
    start_time = time.time()
     
    qind, pixelist = roi.extract_label_indices(   rois  )
    noqs = len( np.unique(qind) )
    nopr = np.bincount(qind, minlength=(noqs+1))[1:]    
     
    if data_pixel is None:
        data_pixel =   Get_Pixel_Array( data, pixelist).get_data()
        #print (data_pixel.shape)
    
    try:
        noframes = len(data)
    except:
        noframes = data.length
    g12b = np.zeros(  [noframes, noframes, noqs] )
    Unitq = (noqs/10)
    proi=0
    
    for qi in range(1, noqs + 1 ):
        pixelist_qi =  np.where( qind == qi)[0] 
        #print (pixelist_qi.shape,  data_pixel[qi].shape)
        data_pixel_qi =    data_pixel[:,pixelist_qi]   
        
        sum1 = (np.average( data_pixel_qi, axis=1)).reshape( 1, noframes   )  
        sum2 = sum1.T       
        
        g12b[:,:,qi -1 ] = np.dot(   data_pixel_qi, data_pixel_qi.T)  /sum1  / sum2  / nopr[qi -1]
        #print ( proi, int( qi //( Unitq) ) )
        if  int( qi //( Unitq) ) == proi:
            sys.stdout.write("#")
            sys.stdout.flush() 
            proi += 1
            
    elapsed_time = time.time() - start_time
    print ('Total time: %.2f min' %(elapsed_time/60.))
    
    return g12b
def auto_two_Array_g1_norm( data, rois, data_pixel=None  ):
    
    ''' 
    Dec 16, 2015, Y.G.@CHX
    a numpy operation method to get two-time correlation function with a normalization
    the purpose for this fucntion is to get a exactly same result as one-time correlation function
    
    Parameters:
        data:  images sequence, shape as [img[0], img[1], imgs_length]
        rois: 2-D array, the interested roi, has the same shape as image, can be rings for saxs, boxes for gisaxs
    
    Options:
        
        data_pixel: if not None,    
                    2-D array, shape as (len(images), len(qind)),
                    use function Get_Pixel_Array( ).get_data(  ) to get 
         
   
    Return:
        g12b_norm: a 3-D array, shape as ( imgs_length, imgs_length, q), 
                   a convention two-time correlation functio  
                   same as obtained by auto_two_Array( data, roi, data_pixel=None  ) 
        g12b:      a 3-D array, shape as ( imgs_length, imgs_length, q), 
                   a non-normlized two-time correlation function
                   
        norms:     a 2-D array, shape as ( imgs_length,   q), a normalization for further get one-time from two time  
     
    One example:        
        g12b_norm, g12b_not_norm, norms = auto_two_Array_g1_norm( imgsr, ring_mask, data_pixel = data_pixel ) 
        
    '''
        
        
        
    start_time = time.time()
    qind, pixelist = roi.extract_label_indices(   rois  )
    noqs = len( np.unique(qind) )
    nopr = np.bincount(qind, minlength=(noqs+1))[1:]    
    if data_pixel is None:
        data_pixel =   Get_Pixel_Array( data, pixelist).get_data()
        #print (data_pixel.shape)
    
    try:
        noframes = len(data)
    except:
        noframes = data.length
    g12b_norm = np.zeros(  [noframes, noframes, noqs] )
    g12b = np.zeros(  [noframes, noframes, noqs] )
    norms = np.zeros(  [noframes, noqs] )
    
    Unitq = (noqs/10)
    proi=0
    
    for qi in range(1, noqs + 1 ):
        pixelist_qi =  np.where( qind == qi)[0] 
        #print (pixelist_qi.shape,  data_pixel[qi].shape)
        data_pixel_qi =    data_pixel[:,pixelist_qi]   
        
        sum1 = (np.average( data_pixel_qi, axis=1)).reshape( 1, noframes   )  
        sum2 = sum1.T        
        #norms_g12  =  sum1 * sum2 * nopr[qi -1]
        norms[:,qi -1 ]  =  sum1 
        
        g12b[:,:,qi -1 ] = np.dot(   data_pixel_qi, data_pixel_qi.T)  
        g12b_norm[:,:,qi -1 ] = g12b[:,:,qi -1 ]/ sum1 / sum2 / nopr[qi -1]
        #print ( proi, int( qi //( Unitq) ) )
        if  int( qi //( Unitq) ) == proi:
            sys.stdout.write("#")
            sys.stdout.flush() 
            proi += 1
            
    elapsed_time = time.time() - start_time
    print ('Total time: %.2f min' %(elapsed_time/60.))
    
    return g12b_norm, g12b, norms
def autocor_one_time(num_buf,
                     ring_mask,
                     imgs,
                     num_lev=None,
                     start_img=None,
                     end_img=None,
                     bad_images=None,
                     threshold=None):
    start_time = time.time()
    #print (dly)
    if start_img is None:
        start_img = 0
    if end_img is None:
        try:
            end_img = len(imgs)
        except:
            end_img = imgs.length

    #print (start_img, end_img)
    noframes = end_img - start_img  #+ 1
    #print (noframes)

    if num_lev is None:
        num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1
    nolev = num_lev
    nobuf = num_buf
    print('The lev number is %s' % num_lev)

    dly, dict_dly = delays(num_lev, num_buf, time=1)
    #print (dly.max())
    lev_leng = np.array([len(dict_dly[i]) for i in list(dict_dly.keys())])

    qind, pixelist = roi.extract_label_indices(ring_mask)
    noqs = np.max(qind)
    nopr = np.bincount(qind, minlength=(noqs + 1))[1:]
    nopixels = nopr.sum()
    start_time = time.time()

    buf = np.ma.zeros([num_lev, num_buf, nopixels])
    buf.mask = True

    cts = np.zeros(num_lev)
    cur = np.ones(num_lev) * num_buf
    countl = np.array(np.zeros(num_lev), dtype='int')

    g2 = np.zeros([noframes, noframes, noqs])

    G = np.zeros([(nolev + 1) * int(nobuf / 2), noqs])
    IAP = np.zeros([(nolev + 1) * int(nobuf / 2), noqs])
    IAF = np.zeros([(nolev + 1) * int(nobuf / 2), noqs])
    num = np.array(np.zeros(num_lev), dtype='int')

    Num = {key: [0] * len(dict_dly[key]) for key in list(dict_dly.keys())}
    print('Doing g2 caculation of %s frames---' % (noframes))
    ttx = 0
    #if bad_images is None:bad_images=[]
    for n in range(start_img, end_img):  ##do the work here

        img = imgs[n]
        img_ = (np.ravel(img))[pixelist]

        #print ( img_.max() )
        if threshold is not None:
            if img_.max() >= threshold:
                print('bad image: %s here!' % n)
                img_ = np.ma.zeros(len(img_))
                img_.mask = True

        if bad_images is not None:
            if n in bad_images:
                print('bad image: %s here!' % n)
                img_ = np.ma.zeros(len(img_))
                img_.mask = True

        cur[0] = 1 + cur[0] % num_buf  # increment buffer

        buf[0, cur[0] - 1] = img_

        img = []  #//save space
        img_ = []
        countl[0] = 1 + countl[0]

        process_one_time(lev=0,
                         bufno=cur[0] - 1,
                         G=G,
                         IAP=IAP,
                         IAF=IAF,
                         buf=buf,
                         num=num,
                         num_buf=num_buf,
                         noqs=noqs,
                         qind=qind,
                         nopr=nopr,
                         dly=dly,
                         Num=Num,
                         lev_leng=lev_leng)
        #time_ind[0].append(  current_img_time   )
        processing = 1
        lev = 1
        while processing:
            if cts[lev]:
                prev = 1 + (cur[lev - 1] - 1 - 1 + num_buf) % num_buf
                cur[lev] = 1 + cur[lev] % num_buf
                countl[lev] = 1 + countl[lev]

                bufa = buf[lev - 1, prev - 1]
                bufb = buf[lev - 1, cur[lev - 1] - 1]

                if (bufa.data == 0).all():
                    buf[lev, cur[lev] - 1] = bufa
                elif (bufb.data == 0).all():
                    buf[lev, cur[lev] - 1] = bufb
                else:
                    buf[lev, cur[lev] - 1] = (bufa + bufb) / 2.

                cts[lev] = 0
                t1_idx = (countl[lev] - 1) * 2

                process_one_time(lev=lev,
                                 bufno=cur[lev] - 1,
                                 G=G,
                                 IAP=IAP,
                                 IAF=IAF,
                                 buf=buf,
                                 num=num,
                                 num_buf=num_buf,
                                 noqs=noqs,
                                 qind=qind,
                                 nopr=nopr,
                                 dly=dly,
                                 Num=Num,
                                 lev_leng=lev_leng)

                lev += 1
                #//Since this level finished, test if there is a next level for processing
                if lev < num_lev: processing = 1
                else: processing = 0
            else:
                cts[lev] = 1  #// set flag to process next time
                processing = 0  #// can stop until more images are accumulated

        if n % (noframes / 10) == 0:
            sys.stdout.write("#")
            sys.stdout.flush()
    #print G.shape
    if (len(np.where(IAP == 0)[0]) != 0) and (0 not in nopr):
        gmax = np.where(IAP == 0)[0][0]
    else:
        gmax = IAP.shape[0]
    #g2=G/(IAP*IAF)
    #print G
    g2 = (G[:gmax] / (IAP[:gmax] * IAF[:gmax]))
    elapsed_time = time.time() - start_time
    #print (Num)
    print('Total time: %.2f min' % (elapsed_time / 60.))
    return g2, dly[:gmax]  #, elapsed_time/60.
roi_width = 9 # in pixels
roi_spacing = (5.0, 4.0)
x_center = 7. # in pixels
y_center = (129.) # in pixels
num_rings = 3

# get the edges of the rings
edges = roi.ring_edges(roi_start, width=roi_width,
                       spacing=roi_spacing, num_rings=num_rings)

# get the label array from the ring shaped 3 region of interests(ROI's)
labeled_roi_array = roi.rings(
    edges, (y_center, x_center), img_stack.shape[1:])

# extarct the ROI's lables and pixel indices corresponding to those labels
roi_indices, pixel_list = roi.extract_label_indices(labeled_roi_array)


# define the ROIs
roi_start = 65 # in pixels
roi_width = 9 # in pixels
roi_spacing = (5.0, 4.0)
x_center = 7. # in pixels
y_center = (129.) # in pixels
num_rings = 3

# get the edges of the rings
edges = roi.ring_edges(roi_start, width=roi_width,
                       spacing=roi_spacing, num_rings=num_rings)

# get the label array from the ring shaped 3 region of interests(ROI's)
def autocor_two_time(num_buf,
                     ring_mask,
                     imgs,
                     num_lev=None,
                     start_img=None,
                     end_img=None):

    #print (dly)
    if start_img is None: start_img = 0
    if end_img is None:
        try:
            end_img = len(imgs)
        except:
            end_img = imgs.length

    #print (start_img, end_img)
    noframes = end_img - start_img  #+ 1
    #print (noframes)

    if num_lev is None:
        num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1
    print('The lev number is %s' % num_lev)

    dly, dict_dly = delays(num_lev, num_buf, time=1)
    #print (dly.max())

    qind, pixelist = roi.extract_label_indices(ring_mask)
    noqs = np.max(qind)
    nopr = np.bincount(qind, minlength=(noqs + 1))[1:]
    nopixels = nopr.sum()

    start_time = time.time()

    buf = np.zeros([num_lev, num_buf,
                    nopixels])  #// matrix of buffers, for store img

    cts = np.zeros(num_lev)
    cur = np.ones(num_lev) * num_buf
    countl = np.array(np.zeros(num_lev), dtype='int')

    g12 = np.zeros([noframes, noframes, noqs])

    num = np.array(np.zeros(num_lev), dtype='int')
    time_ind = {key: [] for key in range(num_lev)}

    ttx = 0
    for n in range(start_img, end_img):  ##do the work here

        cur[0] = 1 + cur[0] % num_buf  # increment buffer
        img = imgs[n]

        #print ( 'The insert image is %s' %(n) )

        buf[0, cur[0] - 1] = (np.ravel(img))[pixelist]
        img = []  #//save space
        countl[0] = 1 + countl[0]
        current_img_time = n - start_img + 1

        process_two_time(lev=0,
                         bufno=cur[0] - 1,
                         n=current_img_time,
                         g12=g12,
                         buf=buf,
                         num=num,
                         num_buf=num_buf,
                         noqs=noqs,
                         qind=qind,
                         nopr=nopr,
                         dly=dly)
        time_ind[0].append(current_img_time)
        processing = 1
        lev = 1
        while processing:
            if cts[lev]:
                prev = 1 + (cur[lev - 1] - 1 - 1 + num_buf) % num_buf
                cur[lev] = 1 + cur[lev] % num_buf
                countl[lev] = 1 + countl[lev]
                buf[lev, cur[lev] - 1] = (buf[lev - 1, prev - 1] +
                                          buf[lev - 1, cur[lev - 1] - 1]) / 2.
                cts[lev] = 0
                t1_idx = (countl[lev] - 1) * 2
                current_img_time = ((time_ind[lev - 1])[t1_idx] +
                                    (time_ind[lev - 1])[t1_idx + 1]) / 2.
                time_ind[lev].append(current_img_time)
                process_two_time(lev=lev,
                                 bufno=cur[lev] - 1,
                                 n=current_img_time,
                                 g12=g12,
                                 buf=buf,
                                 num=num,
                                 num_buf=num_buf,
                                 noqs=noqs,
                                 qind=qind,
                                 nopr=nopr,
                                 dly=dly)
                lev += 1
                #//Since this level finished, test if there is a next level for processing
                if lev < num_lev: processing = 1
                else: processing = 0
            else:
                cts[lev] = 1  #// set flag to process next time
                processing = 0  #// can stop until more images are accumulated

        if n % (noframes / 10) == 0:
            sys.stdout.write("#")
            sys.stdout.flush()

    for q in range(noqs):
        x0 = g12[:, :, q]
        g12[:, :, q] = np.tril(x0) + np.tril(x0).T - np.diag(np.diag(x0))
    elapsed_time = time.time() - start_time
    print('Total time: %.2f min' % (elapsed_time / 60.))

    return g12, elapsed_time / 60.
Esempio n. 20
0
def show_qzr_map(qr, qz, inc_x0, data=None, Nzline=10, Nrline=10):
    ''' 
    Dec 16, 2015, Y.G.@CHX
    plot a qzr map of a gisaxs image (data) 
    
    Parameters:
        qr:  2-D array, qr of a gisaxs image (data)
        qz:  2-D array, qz of a gisaxs image (data)
        inc_x0:  the incident beam center x 
         
    Options:
        data: 2-D array, a gisaxs image, if None, =qr+qz
        Nzline: int, z-line number
        Nrline: int, r-line number
        
        
    Return:
        zticks: list, z-tick positions in unit of pixel
        zticks_label: list, z-tick positions in unit of real space
        rticks: list, r-tick positions in unit of pixel
        rticks_label: list, r-tick positions in unit of real space
   
    
    Examples:
        
        ticks = show_qzr_map(  qr, qz, inc_x0, data = None, Nzline=10, Nrline= 10   )
        ticks = show_qzr_map(  qr,qz, inc_x0, data = avg_imgmr, Nzline=10,  Nrline=10   )
    '''

    import matplotlib.pyplot as plt
    import copy
    import matplotlib.cm as mcm

    cmap = 'viridis'
    _cmap = copy.copy((mcm.get_cmap(cmap)))
    _cmap.set_under('w', 0)

    qr_start, qr_end, qr_num = qr.min(), qr.max(), Nzline
    qz_start, qz_end, qz_num = qz.min(), qz.max(), Nrline
    qr_edge, qr_center = get_qedge(qr_start, qr_end,
                                   (qr_end - qr_start) / (qr_num + 100),
                                   qr_num)
    qz_edge, qz_center = get_qedge(qz_start, qz_end,
                                   (qz_end - qz_start) / (qz_num + 100),
                                   qz_num)

    label_array_qz = get_qmap_label(qz, qz_edge)
    label_array_qr = get_qmap_label(qr, qr_edge)

    labels_qz, indices_qz = roi.extract_label_indices(label_array_qz)
    labels_qr, indices_qr = roi.extract_label_indices(label_array_qr)
    num_qz = len(np.unique(labels_qz))
    num_qr = len(np.unique(labels_qr))

    fig, ax = plt.subplots()
    if data is None:
        data = qr + qz
        im = ax.imshow(data, cmap='viridis', origin='lower')
    else:
        im = ax.imshow(data,
                       cmap='viridis',
                       origin='lower',
                       norm=LogNorm(vmin=0.001, vmax=1e1))

    imr = ax.imshow(label_array_qr,
                    origin='lower',
                    cmap='viridis',
                    vmin=0.5,
                    vmax=None)  #,interpolation='nearest',)
    imz = ax.imshow(label_array_qz,
                    origin='lower',
                    cmap='viridis',
                    vmin=0.5,
                    vmax=None)  #,interpolation='nearest',)

    caxr = fig.add_axes([0.81, 0.1, 0.03, .8])  #x,y, width, heigth

    cba = fig.colorbar(im, cax=caxr)
    ax.set_xlabel(r'$q_r$', fontsize=18)
    ax.set_ylabel(r'$q_z$', fontsize=18)

    zticks, zticks_label = get_qz_tick_label(qz, label_array_qz)
    #rticks,rticks_label  = get_qr_tick_label(label_array_qr,inc_x0)

    rticks, rticks_label = zip(
        *sorted(zip(*get_qr_tick_label(qr, label_array_qr, inc_x0))))

    stride = int(len(zticks) / 7)
    ax.set_yticks(zticks[::stride])
    yticks = zticks_label[::stride]
    ax.set_yticklabels(yticks, fontsize=9)

    stride = int(len(rticks) / 7)
    ax.set_xticks(rticks[::stride])
    xticks = rticks_label[::stride]
    ax.set_xticklabels(xticks, fontsize=9)

    ax.set_title('Q-zr_Map', y=1.03, fontsize=18)
    plt.show()
    return zticks, zticks_label, rticks, rticks_label
Esempio n. 21
0
roi_spacing = (5.0, 4.0)
x_center = 7.  # in pixels
y_center = (129.)  # in pixels
num_rings = 3

# get the edges of the rings
edges = roi.ring_edges(roi_start,
                       width=roi_width,
                       spacing=roi_spacing,
                       num_rings=num_rings)

# get the label array from the ring shaped 3 region of interests(ROI's)
labeled_roi_array = roi.rings(edges, (y_center, x_center), img_stack.shape[1:])

# extarct the ROI's lables and pixel indices corresponding to those labels
roi_indices, pixel_list = roi.extract_label_indices(labeled_roi_array)

# define the ROIs
roi_start = 65  # in pixels
roi_width = 9  # in pixels
roi_spacing = (5.0, 4.0)
x_center = 7.  # in pixels
y_center = (129.)  # in pixels
num_rings = 3

# get the edges of the rings
edges = roi.ring_edges(roi_start,
                       width=roi_width,
                       spacing=roi_spacing,
                       num_rings=num_rings)