Esempio n. 1
0
def asmemmaps(basename, video, count = None):
    """Loads multi-frame video into numpy memmaps. 
    
    Actual data is written to numpy files with the provide basename and
    subscripted by source identifier (index), e.g. "basename_0.npy" and "basename_1.npy"
    in case of dual-frame video source.
     
    Parameters
    ----------
    basename: str
       Base name for the filenames of the videos. 
    video : iterable
       A multi-frame iterator object.
    count : int, optional
       Defines how many multi-frames are in the video. If not provided it is determined
       by len().
       
    Returns
    -------
    out : tuple of arrays
        A tuple of memmapped array(s) representing video(s)
    """
    
    if count is None:
        try:
            count = len(video)
        except TypeError:
            raise ValueError("You must provide count")
        
    def _load(array, frame):
        array[...] = frame
        
    def _empty_arrays(frames):
        out = tuple( (np.lib.format.open_memmap(basename + "_{}.npy".format(i), "w+", shape = (count,) + frame.shape, dtype = frame.dtype) 
                      for i,frame in enumerate(frames)))
        return out

    print1("Writing to memmap...")
    print_progress(0, count)
    
    frames = next(video)
    out = _empty_arrays(frames)
    [_load(out[i][0],frame) for i,frame in enumerate(frames)]
    for j,frames in enumerate(video):
        print_progress(j+1, count)
        [_load(out[i][j+1],frame) for i,frame in enumerate(frames)]
    
    print_progress(count, count)   
    return out
Esempio n. 2
0
def asarrays(video, count = None):
    """Loads multi-frame video into numpy arrays. 
     
    Parameters
    ----------
    video : iterable
        A multi-frame iterator object.
    count : int, optional
        Defines how many frames are in the video. If not provided it will calculate
        length of the video based on the length of the iterable. If that is not
        possible ValueError is raised
       
    Returns
    -------
    out : tuple of arrays
        A tuple of array(s) representing video(s)
    """
    
    t0 = time.time()
    
    def _load(array, frame):
        array[...] = frame
        
    print1("Loading array...")
    
    if count is None:
        try:
            count = len(video)
        except TypeError:
            raise ValueError("You must provide count")

    print_progress(0, count)
    
    video = iter(video)
    
    frames = next(video)
    out = tuple((np.empty(shape = (count,) + frame.shape, dtype = frame.dtype) for frame in frames))
    [_load(out[i][0],frame) for i,frame in enumerate(frames)]
    for j,frames in enumerate(video):
        print_progress(j+1, count)
        [_load(out[i][j+1],frame) for i,frame in enumerate(frames)]
        
    print_progress(count, count)
    print_frame_rate(count,t0)
    return out
Esempio n. 3
0
 def test_print(self):
     conf.set_verbose(0)
     pt.print1("No print")
     pt.print2("No print")
     pt.print_progress(4, 10)
     pt.print_progress(10, 10)
     conf.set_verbose(1)
     pt.print1("Ok")
     pt.print2("No print")
     conf.set_verbose(2)
     pt.print1("OK")
     pt.print2("OK")
     pt.print_progress(4, 10)
     pt.print_progress(10, 10)
Esempio n. 4
0
def normalize(data,
              background=None,
              variance=None,
              norm=None,
              mode="corr",
              scale=False,
              mask=None,
              weight=None,
              ret_weight=False,
              out=None):
    """Normalizes correlation (difference) data. Data must be data as returned
    from ccorr or acorr functions. 
    
    Except forthe most basic normalization, background and variance data must be provided.
    Tou can use :func:`stats` to compute background and variance data.
    
    Parameters
    ----------
    data : tuple of ndarrays
        Input data, a length 4 (difference data) or length 5 tuple (correlation data)
    background : (ndarray, ndarray) or ndarray, optional
        Background (mean) of the frame(s) in k-space
    variance : (ndarray, ndarray) or ndarray, optional
        Variance of the frame(s) in k-space
    norm : int, optional
        Normalization type (0:baseline,1:compensation,2:bg subtract,
        3: compensation + bg subtract). Input data must support the chosen
        normalization, otherwise exception is raised. If not given it is chosen
        based on the input data.
    mode : str, optional
        Representation mode: either "corr" (default) for correlation function,
        or "diff" for image structure function (image difference).
    scale : bool, optional
        If specified, performs scaling so that data is scaled beteween 0 and 1.
        This works in connection with variance, which must be provided.
    mask : ndarray, optional
        An array of bools indicating which k-values should we select. If not 
        given, compute at every k-value.
    weight : ndarray, optional
        If you wish to specify your own weight for weighted normalization, you 
        must provide it here, otherwise it is computed from the data (default).
    ret_weight : bool, optional
        Whether to return weight (when calculating weighted normalization)
        
    out : ndarray, optional
        Output array
        
    Returns
    -------
    out : ndarray
        Normalized data.
    out, weight : ndarray, ndarray
        Normalized data and weight if 'ret_weight' was specified
    """
    print1("Normalizing...")
    #determine what kind of data is there ('diff' or 'corr')
    method = _method_from_data(data)
    scale = _inspect_scale(scale)
    mode = _inspect_mode(mode)

    if isinstance(norm, str):
        norm = norm_from_string(norm)

    #determine default normalization if not specified by user
    norm = _default_norm_from_data(data, method, norm)

    #    if len(data[1].shape) > 1 and norm & NORM_WEIGHTED:
    #        #if multilevel data, subtracted or baseline is best, so force it here.
    #        norm = norm & NORM_SUBTRACTED

    # scale factor for normalization
    if scale == True:
        _scale_factor = scale_factor(variance, mask)

    print2("   * background : {}".format(background is not None))
    print2("   * variance   : {}".format(variance is not None))
    print2("   * norm       : {}".format(norm))
    print2("   * scale      : {}".format(scale))
    print2("   * mode       : {}".format(mode))
    print2("   * mask       : {}".format(mask is not None))

    bg1, bg2 = _inspect_background(background, norm, mask)

    if (norm & NORM_WEIGHTED == NORM_WEIGHTED):
        level = disable_prints()
        norm_comp = (norm &
                     (NORM_SUBTRACTED | NORM_COMPENSATED)) | NORM_STRUCTURED
        comp_data = normalize(data,
                              background,
                              variance,
                              norm=norm_comp,
                              mode=mode,
                              scale=scale,
                              mask=mask)
        norm_base = (norm &
                     (NORM_SUBTRACTED | NORM_COMPENSATED)) | NORM_STANDARD
        base_data = normalize(data,
                              background,
                              variance,
                              norm=norm_base,
                              mode=mode,
                              scale=scale,
                              mask=mask)

        _multilevel = True if len(data[1].shape) > 1 else False

        x_avg, y_avg = _data_estimator(comp_data,
                                       size=8,
                                       n=3,
                                       multilevel=_multilevel)

        _scale_factor = 1. if scale == True else scale_factor(variance, mask)

        if weight is None:
            if _multilevel:
                shape = np.array(comp_data.shape)
                shape[1:-1] = 1
                _x_interp = np.arange(comp_data.shape[-1])
                x_interp = np.empty(shape=shape, dtype=int)
                for x_level in x_interp:
                    x_level[...] = _x_interp
                    _x_interp *= 2
            else:
                x_interp = np.arange(data[1].shape[-1])

            if norm & NORM_SUBTRACTED:
                delta = noise_delta(variance, mask, scale=scale)
                weight = weight_from_data(y_avg,
                                          delta,
                                          scale_factor=_scale_factor,
                                          mode=mode)
            else:
                bg1 = bg1 / (scale_factor(variance,
                                          mask))**0.5 if scale == True else bg1
                bg2 = bg2 / (scale_factor(variance,
                                          mask))**0.5 if scale == True else bg2
                delta = noise_delta(variance, mask, scale=scale)
                weight = weight_prime_from_data(y_avg,
                                                bg1,
                                                bg2,
                                                delta,
                                                scale_factor=_scale_factor,
                                                mode=mode)
            weight = _nb.log_interpolate(x_interp, x_avg, weight)

        enable_prints(level)

        if ret_weight == True:
            return weighted_sum(comp_data, base_data, weight), weight
        else:
            return weighted_sum(comp_data, base_data, weight)

    count = data[1]

    #add dimensions for broadcasting
    bg1 = bg1[..., None]
    bg2 = bg2[..., None]

    if mask is not None:
        data = take_data(data, mask)

    #dimensions of correlation data (the first element of the data tuple)
    ndim = data[0].ndim

    #need to add dimensions to count for broadcasting
    #for 2D data, this is equivalent to count = count[...,None,None,:]
    ndiff = ndim - count.ndim
    for i in range(ndiff):
        count = np.expand_dims(count, -2)

    if norm == NORM_STANDARD:
        result = _normalize_ccorr_0(data[0], count, bg1, bg2, out=out)
        if mode == "diff":
            result = _corr2diff(result, variance, mask)

    elif norm == NORM_STRUCTURED:
        if method == "corr":
            offset = _variance2offset(variance, mask)
            result = _normalize_ccorr_1(data[0],
                                        count,
                                        bg1,
                                        bg2,
                                        data[2],
                                        out=out)
            #m1 = data[3]
            #m2 = data[4] if data[4] is not None else m1
            #result = _normalize_ccorr_3b(data[0], count, data[2], m1, m2,  out = out)

            result += offset
            if mode == "diff":
                result = _corr2diff(result, variance, mask)

        else:
            d = bg2 - bg1
            result = _normalize_cdiff_1(data[0], count, d, out=out)
            if mode == "corr":
                result = _diff2corr(result, variance, mask)

    elif (norm == NORM_SUBTRACTED | NORM_STANDARD) or (
            norm == NORM_SUBTRACTED | NORM_STANDARD | NORM_COMPENSATED):
        m1 = data[3]
        m2 = data[4] if data[4] is not None else m1
        result = _normalize_ccorr_2(data[0], count, bg1, bg2, m1, m2, out=out)
        if mode == "diff":
            result = _corr2diff(result, variance, mask)

    elif norm == NORM_STANDARD | NORM_COMPENSATED:
        m1 = data[3]
        m2 = data[4] if data[4] is not None else m1
        result = _normalize_ccorr_2b(data[0], count, m1, m2, out=out)
        if mode == "diff":
            result = _corr2diff(result, variance, mask)

    elif (norm == NORM_STRUCTURED | NORM_SUBTRACTED) or (
            norm == NORM_SUBTRACTED | NORM_STRUCTURED | NORM_COMPENSATED):
        if method == "corr":
            offset = _variance2offset(variance, mask)
            m1 = data[3]
            m2 = data[4] if data[4] is not None else m1
            #result = _normalize_ccorr_2b(data[0], count, m1,m2, out = out)
            result = _normalize_ccorr_3(data[0],
                                        count,
                                        bg1,
                                        bg2,
                                        data[2],
                                        m1,
                                        m2,
                                        out=out)
            result += offset
            if mode == "diff":
                result = _corr2diff(result, variance, mask)
        else:
            d = bg2 - bg1
            result = _normalize_cdiff_3(data[0],
                                        count,
                                        d,
                                        data[2],
                                        data[3],
                                        out=out)

            if mode == "corr":
                result = _diff2corr(result, variance, mask)
    elif norm == NORM_STRUCTURED | NORM_COMPENSATED:
        offset = _variance2offset(variance, mask)
        m1 = data[3]
        m2 = data[4] if data[4] is not None else m1
        result = _normalize_ccorr_3b(data[0], count, data[2], m1, m2, out=out)
        result += offset
        if mode == "diff":
            result = _corr2diff(result, variance, mask)
    else:
        raise ValueError("Unknown normalization mode {}".format(norm))

    if scale == True:
        result /= _scale_factor[..., None]

    if ret_weight == True:
        return result, weight
    else:
        return result