def get_template_from_img(img, rect):
    ###################  CALCULATING SPLINE AND INTERPOLATING IMAGE WARP #########################
    spline_It = RectBivariateSpline( np.arange(img.shape[0]), np.arange(img.shape[1]), img)

    template = spline_It.__call__( np.linspace(rect[1], rect[3], np.round(rect[3] - rect[1]+1), endpoint=True),
                                   np.linspace(rect[0], rect[2], np.round(rect[2] - rect[0]+1), endpoint=True), grid=True)
    return template
コード例 #2
0
ファイル: spline_.py プロジェクト: nmik/Xgam
 def __call__(self, x, y, dx=0, dy=0, grid=False):
     """Overloaded __call__method.
     Here we basically override the default value of the `grid` parameter
     from `True` to `False`, since we're typically interested in evaluating
     the splined at given physical coordinates, rather than grid points.
     """
     return RectBivariateSpline.__call__(self, x, y, dx, dy, grid)
コード例 #3
0
    def __call__(self, x, y, **kwargs):
        if 'grid' not in kwargs:
            x, y = np.meshgrid(x, y)
            kwargs['grid'] = False

            result = RectBivariateSpline.__call__(self, x, y, **kwargs)
            # result = np.where((x < self.xmax) & (x > self.xmin), result, 0.)
            # result[np.isnan(result)] = 0.
            # return result.T
            return np.where(np.isnan(result), 0., result).T
        else:
            result = RectBivariateSpline.__call__(self, x, y, **kwargs)
            # result = np.where((x <= xmax) & (x >= xmin), result, 0.)
            # result[np.isnan(result)] = 0.
            # return result
            return np.where(np.isnan(result), 0., result)
コード例 #4
0
ファイル: gSpline.py プロジェクト: nmik/GRATools
 def __call__(self, x, y, dx=0, dy=0, grid=False):
     """Overloaded __call__method.
     Here we basically override the default value of the `grid` parameter
     from `True` to `False`, since we're typically interested in evaluating
     the splined at given physical coordinates, rather than grid points.
     """
     return RectBivariateSpline.__call__(self, x, y, None, dx, dy, grid)
コード例 #5
0
def sparsery(ops):
    rez, max_proj = get_mov(ops)
    ops['max_proj'] = max_proj
    nframes, Ly, Lx = rez.shape
    ops['Lyc'] = Ly
    ops['Lxc'] = Lx
    sdmov = get_sdmov(rez, ops)
    rez /= sdmov
    #rez *= -1

    rez = neuropil_subtraction(rez, ops['spatial_hp'])

    LL = np.meshgrid(np.arange(Lx), np.arange(Ly))
    Lyp = np.zeros(5, 'int32')
    Lxp = np.zeros(5,'int32')
    gxy = [np.array(LL).astype('float32')]
    dmov = rez
    movu = []

    for j in range(5):
        movu.append(square_conv2(dmov, 3))
        dmov = 2 * downsample(dmov)
        gxy0 = downsample(gxy[j], False)
        gxy.append(gxy0)
        nfr, Lyp[j], Lxp[j] = movu[j].shape
        movu[j] = np.reshape(movu[j], (nfr,-1))

    nfr, Lyc,Lxc = rez.shape
    V0 = []
    ops['Vmap']  = []
    for j in range(len(movu)):
        V0.append(np.amax(movu[j], axis=0))
        #V0.append(np.sum(movu[j]**2 * np.float32(movu[j]>Th2), axis=0)**.5)
        V0[j] = np.reshape(V0[j], (Lyp[j], Lxp[j]))
        ops['Vmap'].append(V0[j].copy())
    I = np.zeros((len(gxy), gxy[0].shape[1], gxy[0].shape[2]))
    for t in range(1,len(gxy)-1):
        gmodel = RectBivariateSpline(gxy[t][1,:,0], gxy[t][0, 0,:], ops['Vmap'][t],
                                     kx=min(3, gxy[t][1,:,0].size-1), ky=min(3, gxy[t][0,0,:].size-1))
        I[t] = gmodel.__call__(gxy[0][1,:,0], gxy[0][0, 0,:])
    I0 = np.amax(I, axis=0)
    ops['Vcorr'] = I0
    imap = np.argmax(I, axis=0).flatten()
    ipk = np.abs(I0 - maximum_filter(I0, size=(11,11))).flatten() < 1e-4
    isort = np.argsort(I0.flatten()[ipk])[::-1]
    im, nm = mode(imap[ipk][isort[:50]])
    if ops['spatial_scale'] > 0:
        im = max(1, min(4, ops['spatial_scale']))
        fstr = 'FORCED'
    else:
        fstr = 'estimated'

    if im==0:
        print('ERROR: best scale was 0, everything should break now!')
    Th2 = ops['threshold_scaling']*5*max(1,im)
    vmultiplier = max(1, np.float32(rez.shape[0])/1200)
    print('NOTE: %s spatial scale ~%d pixels, time epochs %2.2f, threshold %2.2f '%(fstr, 3*2**im, vmultiplier, vmultiplier*Th2))
    ops['spatscale_pix'] = 3*2**im

    V0 = []
    ops['Vmap']  = []
    for j in range(len(movu)):
        #V0.append(np.amax(movu[j], axis=0))
        #V0.append(np.sum(movu[j]**2 * np.float32(movu[j]>Th2), axis=0)**.5)
        V0.append(threshold_reduce(movu[j], Th2))
        V0[j] = np.reshape(V0[j], (Lyp[j], Lxp[j]))
        ops['Vmap'].append(V0[j].copy())
    I = np.zeros((len(gxy), gxy[0].shape[1], gxy[0].shape[2]))
    for t in range(1,len(gxy)-1):
        gmodel = RectBivariateSpline(gxy[t][1,:,0], gxy[t][0, 0,:], ops['Vmap'][t],
                                     kx=min(3, gxy[t][1,:,0].size-1), ky=min(3, gxy[t][0,0,:].size-1))
        I[t] = gmodel.__call__(gxy[0][1,:,0], gxy[0][0, 0,:])
    I0 = np.amax(I, axis=0)
    ops['Vcorr'] = I0


    xpix,ypix,lam = [],[],[]
    rez = np.reshape(rez, (-1,Ly*Lx))
    lxs = 3 * 2**np.arange(5)
    nscales = len(lxs)

    niter = 250 * ops['max_iterations']
    Vmax = np.zeros((niter))
    ihop = np.zeros((niter))
    vrat = np.zeros((niter))
    Npix = np.zeros((niter))

    t0 = time.time()

    for tj in range(niter):
        v0max = np.array([np.amax(V0[j]) for j in range(5)])
        imap = np.argmax(v0max)
        imax = np.argmax(V0[imap])
        yi, xi = np.unravel_index(imax, (Lyp[imap], Lxp[imap]))
        yi, xi = gxy[imap][1,yi,xi], gxy[imap][0,yi,xi]

        Vmax[tj] = np.amax(v0max)
        if Vmax[tj] < vmultiplier*Th2:
            break
        ls = lxs[imap]

        ihop[tj] = imap

        ypix0, xpix0, lam0 = add_square(int(yi),int(xi),ls,Ly,Lx)
        xproj = rez[:, ypix0*Lx+ xpix0] @ lam0
        goodframe = np.nonzero(xproj>Th2)[0]
        for j in range(3):
            ypix0, xpix0, lam0 = iter_extend(ypix0, xpix0, rez, Ly,Lx, goodframe)
            xproj = rez[:, ypix0*Lx+ xpix0] @ lam0
            goodframe = np.nonzero(xproj>Th2)[0]
            if len(goodframe)<1:
                break
        if len(goodframe)<1:
            break
        vrat[tj], ipack = two_comps(rez[:, ypix0*Lx+ xpix0], lam0, Th2)
        if vrat[tj]>1.25:
            lam0, xp, goodframe = ipack
            xproj[goodframe] = xp
            ix = lam0>lam0.max()/5
            xpix0 = xpix0[ix]
            ypix0 = ypix0[ix]
            lam0 = lam0[ix]
        # update residual on raw movie
        rez[np.ix_(goodframe, ypix0*Lx+ xpix0)] -= xproj[goodframe][:,np.newaxis] * lam0
        # update filtered movie
        ys, xs, lms = multiscale_mask(ypix0,xpix0,lam0, Lyp, Lxp)
        for j in range(nscales):
            movu[j][np.ix_(goodframe,xs[j]+Lxp[j]*ys[j])] -= np.outer(xproj[goodframe], lms[j])
            #V0[j][xs[j] + Lxp[j]*ys[j]] = np.amax(movu[j][:,xs[j]+Lxp[j]*ys[j]], axis=0)
            Mx = movu[j][:,xs[j]+Lxp[j]*ys[j]]
            #V0[j][xs[j] + Lxp[j]*ys[j]] = np.sum(Mx**2 * np.float32(Mx>Th2), axis=0)**.5
            V0[j][ys[j], xs[j]] = np.sum(Mx**2 * np.float32(Mx>Th2), axis=0)**.5
            #V0[j][xs[j] + Lxp[j]*ys[j]] = np.sum(movu[j][:,xs[j]+Lxp[j]*ys[j]]**2 * np.float32(movu[j][:,xs[j]+Lxp[j]*ys[j]]>Th2), axis=0)**.5

        xpix.append(xpix0)
        ypix.append(ypix0)
        lam.append(lam0)
        if tj%1000==0:
            print('%d ROIs, score=%2.2f'%(tj, Vmax[tj]))
    #print(tj, time.time()-t0, Vmax[tj])
    ops['Vmax'] = Vmax
    ops['ihop'] = ihop
    ops['Vsplit'] = vrat
    stat  = [{'ypix':ypix[n], 'lam':lam[n]*sdmov[ypix[n], xpix[n]], 'xpix':xpix[n]} for n in range(len(xpix))]

    stat = get_stat(ops, stat)
    return ops,stat
コード例 #6
0
ファイル: bounded_splines.py プロジェクト: llimeht/refnx
    def __call__(self, x, y):
        outside = self.is_outside_domain(x, y)

        return np.where(outside, self.fill_value,
                                 RectBivariateSpline.__call__(self, x, y))
コード例 #7
0
def sparsery(ops):
    """ bin ops['reg_file'] then detect ROIs using correlations in time
    
    Parameters
    ----------------

    ops : dictionary
        'reg_file', 'Ly', 'Lx', 'yrange', 'xrange', 'tau', 'fs', 'nframes', 'high_pass', 'batch_size'


    Returns
    ----------------

    ops : dictionary
        adds 'max_proj', 'Vcorr', 'Vmap', 'Vsplit'
    
    stat : array of dicts
        list of ROIs

    """
    rez, max_proj = bin_movie(ops)
    ops['max_proj'] = max_proj
    nbinned, Lyc, Lxc = rez.shape
    # cropped size
    ops['Lyc'] = Lyc
    ops['Lxc'] = Lxc
    sdmov = get_sdmov(rez, ops)
    rez /= sdmov

    # subtract low-pass filtered version of binned movie
    rez = neuropil_subtraction(rez, ops['spatial_hp'])

    LL = np.meshgrid(np.arange(Lxc), np.arange(Lyc))
    gxy = [np.array(LL).astype('float32')]
    dmov = rez
    movu = []

    # downsample movie at various spatial scales
    # downsampled sizes
    Lyp = np.zeros(5, 'int32')
    Lxp = np.zeros(5, 'int32')
    for j in range(5):
        # convolve
        movu.append(square_conv2(dmov, 3))
        # downsample
        dmov = 2 * downsample(dmov)
        gxy0 = downsample(gxy[j], False)
        gxy.append(gxy0)
        nbinned, Lyp[j], Lxp[j] = movu[j].shape

    # find maximum spatial scale for each pixel
    V0 = []
    ops['Vmap'] = []
    for j in range(len(movu)):
        V0.append(movu[j].max(axis=0))
        ops['Vmap'].append(V0[j].copy())
    # spline over scales
    I = np.zeros((len(gxy), gxy[0].shape[1], gxy[0].shape[2]))
    for t in range(1, len(gxy) - 1):
        gmodel = RectBivariateSpline(gxy[t][1, :, 0],
                                     gxy[t][0, 0, :],
                                     ops['Vmap'][t],
                                     kx=min(3, gxy[t][1, :, 0].size - 1),
                                     ky=min(3, gxy[t][0, 0, :].size - 1))
        I[t] = gmodel.__call__(gxy[0][1, :, 0], gxy[0][0, 0, :])
    I0 = I.max(axis=0)
    ops['Vcorr'] = I0

    # find best scale based on scale of top peaks
    # (used  to set threshold)
    imap = np.argmax(I, axis=0).flatten()
    ipk = np.abs(I0 - maximum_filter(I0, size=(11, 11))).flatten() < 1e-4
    isort = np.argsort(I0.flatten()[ipk])[::-1]
    im, nm = mode(imap[ipk][isort[:50]])
    if ops['spatial_scale'] > 0:
        im = max(1, min(4, ops['spatial_scale']))
        fstr = 'FORCED'
    else:
        fstr = 'estimated'
    if im == 0:
        print('ERROR: best scale was 0, everything should break now!')

    # threshold for accepted peaks (scale it by spatial scale)
    Th2 = ops['threshold_scaling'] * 5 * max(1, im)
    vmultiplier = max(1, np.float32(rez.shape[0]) / 1200)
    print(
        'NOTE: %s spatial scale ~%d pixels, time epochs %2.2f, threshold %2.2f '
        % (fstr, 3 * 2**im, vmultiplier, vmultiplier * Th2))
    ops['spatscale_pix'] = 3 * 2**im

    V0 = []
    ops['Vmap'] = []
    # get standard deviation for pixels for all values > Th2
    for j in range(len(movu)):
        V0.append(threshold_reduce(movu[j], Th2))
        ops['Vmap'].append(V0[j].copy())
        movu[j] = np.reshape(movu[j], (movu[j].shape[0], -1))

    xpix, ypix, lam = [], [], []
    rez = np.reshape(rez, (-1, Lyc * Lxc))
    lxs = 3 * 2**np.arange(5)
    nscales = len(lxs)

    niter = 250 * ops['max_iterations']
    Vmax = np.zeros((niter))
    ihop = np.zeros((niter))
    vrat = np.zeros((niter))
    Npix = np.zeros((niter))

    t0 = time.time()

    for tj in range(niter):
        # find peaks in stddev's
        v0max = np.array([V0[j].max() for j in range(5)])
        imap = np.argmax(v0max)
        imax = np.argmax(V0[imap])
        yi, xi = np.unravel_index(imax, (Lyp[imap], Lxp[imap]))
        # position of peak
        yi, xi = gxy[imap][1, yi, xi], gxy[imap][0, yi, xi]

        # check if peak is larger than threshold * max(1,nbinned/1200)
        Vmax[tj] = v0max.max()
        if Vmax[tj] < vmultiplier * Th2:
            break
        ls = lxs[imap]

        ihop[tj] = imap

        # make square of initial pixels based on spatial scale of peak
        ypix0, xpix0, lam0 = add_square(int(yi), int(xi), ls, Lyc, Lxc)

        # project movie into square to get time series
        tproj = rez[:, ypix0 * Lxc + xpix0] @ lam0
        goodframe = np.nonzero(tproj > Th2)[0]  # frames with activity > Th2

        # extend mask based on activity similarity
        for j in range(3):
            ypix0, xpix0, lam0 = iter_extend(ypix0, xpix0, rez[goodframe], Lyc,
                                             Lxc)
            tproj = rez[:, ypix0 * Lxc + xpix0] @ lam0
            goodframe = np.nonzero(tproj > Th2)[0]
            if len(goodframe) < 1:
                break
        if len(goodframe) < 1:
            break

        # check if ROI should be split
        vrat[tj], ipack = two_comps(rez[:, ypix0 * Lxc + xpix0], lam0, Th2)
        if vrat[tj] > 1.25:
            lam0, xp, goodframe = ipack
            tproj[goodframe] = xp
            ix = lam0 > lam0.max() / 5
            xpix0 = xpix0[ix]
            ypix0 = ypix0[ix]
            lam0 = lam0[ix]

        # update residual on raw movie
        rez[np.ix_(goodframe, ypix0 * Lxc +
                   xpix0)] -= tproj[goodframe][:, np.newaxis] * lam0
        # update filtered movie
        ys, xs, lms = multiscale_mask(ypix0, xpix0, lam0, Lyp, Lxp)
        for j in range(nscales):
            movu[j][np.ix_(goodframe, xs[j] + Lxp[j] * ys[j])] -= np.outer(
                tproj[goodframe], lms[j])
            Mx = movu[j][:, xs[j] + Lxp[j] * ys[j]]
            V0[j][ys[j],
                  xs[j]] = (Mx**2 * np.float32(Mx > Th2)).sum(axis=0)**.5

        xpix.append(xpix0)
        ypix.append(ypix0)
        lam.append(lam0)
        if tj % 1000 == 0:
            print('%d ROIs, score=%2.2f' % (tj, Vmax[tj]))

    ops['Vmax'] = Vmax
    ops['ihop'] = ihop
    ops['Vsplit'] = vrat
    stat = [{
        'ypix': ypix[n] + ops['yrange'][0],
        'lam': lam[n] * sdmov[ypix[n], xpix[n]],
        'xpix': xpix[n] + ops['xrange'][0],
        'footprint': ops['ihop'][n]
    } for n in range(len(xpix))]

    return ops, stat
def LucasKanadeBasis(It, It1, rect, bases, p0=np.zeros(2)):
    # Input:
    #	It: template image
    #	It1: Current image
    #	rect: Current position of the car
    #	(top left, bot right coordinates)
    #	bases: [n, m, k] where nxm is the size of the template.
    # Output:
    #	p: movement vector [dp_x, dp_y]

    # Put your implementation here

    p = p0

    width_patch = np.round(rect[2] - rect[0] + 1)
    height_patch = np.round(rect[3] - rect[1] + 1)

    # gradX = cv2.Sobel(It1, -1, 1, 0, borderType=cv2.BORDER_CONSTANT)
    # gradY = cv2.Sobel(It1, -1, 0, 1, borderType=cv2.BORDER_CONSTANT)
    gradY, gradX = np.gradient(It1)

    ###################################  SPLINES  ########################################
    spline_It1 = RectBivariateSpline(np.arange(It1.shape[0]),
                                     np.arange(It1.shape[1]), It1)
    spline_It1_X_grad = RectBivariateSpline(np.arange(It1.shape[0]),
                                            np.arange(It1.shape[1]), gradX)
    spline_It1_Y_grad = RectBivariateSpline(np.arange(It1.shape[0]),
                                            np.arange(It1.shape[1]), gradY)

    threshold = 1e-1
    p_delta_norm = np.inf

    while (p_delta_norm > threshold):
        warp = rect + [p[0], p[1], p[0], p[1]]

        jacobian = np.asarray([[1, 0], [0, 1]])

        ################################### INTERPOLATING IT WARP ##################################
        It1_patch = spline_It1.__call__(
            np.linspace(warp[1], warp[3], height_patch, endpoint=True),
            np.linspace(warp[0], warp[2], width_patch, endpoint=True))

        #####################################  TEMPLATE ERROR  #####################################
        b = It - It1_patch
        # print('b flatten shape: ', b.shape)

        b_vect = b.reshape(b.shape[0] * b.shape[1], 1)

        ################################### INTERPOLATING GRAD X and Y WARP ################################
        gradX_It1_patch = spline_It1_X_grad.__call__(
            np.linspace(warp[1], warp[3], height_patch, endpoint=True),
            np.linspace(warp[0], warp[2], width_patch, endpoint=True))
        gradY_It1_patch = spline_It1_Y_grad.__call__(
            np.linspace(warp[1], warp[3], height_patch, endpoint=True),
            np.linspace(warp[0], warp[2], width_patch, endpoint=True))

        gradX_It1_patch_vect = gradX_It1_patch.reshape(
            gradX_It1_patch.shape[0] * gradX_It1_patch.shape[1], 1)
        gradY_It1_patch_vect = gradY_It1_patch.reshape(
            gradY_It1_patch.shape[0] * gradY_It1_patch.shape[1], 1)

        ################################## CALCULATING B, WEIGHTS, BW ################################
        B = np.zeros((bases.shape[0] * bases.shape[1], bases.shape[2]))
        for i in range(bases.shape[2]):
            B[:, i] = bases[:, :, i].flatten()

        # error_vect = It1_patch - It
        # weights2 = B.T.dot(error_vect.reshape( error_vect.shape[0]*error_vect.shape[1] , 1))
        # print('weights1 : ', weights1)

        # weights = []
        # Bw_vect = np.zeros(   It.shape[0]* It.shape[1] )
        # for i in range(bases.shape[2]):
        #     base_vect = bases[:, :, i].reshape( bases.shape[0] * bases.shape[1])
        #     weight = np.dot(base_vect, -b_vect)
        #     weights.append(weight)
        #     Bw_vect += weight * base_vect
        # print('weights2 : ', weights2)

        ################################### STACKING GRAD X and Y WARP ################################
        grad_It1_patch = np.hstack(
            (gradX_It1_patch_vect, gradY_It1_patch_vect))
        # print('grad_It1_patch shape: ', grad_It1_patch.shape)

        A = np.matmul(grad_It1_patch, jacobian)
        # print('A: ', A)
        # print('A shape: ', A.shape)

        ################################ UPDATION FOR APPEARANCE CHANGES #############################

        # Bw_vect = np.expand_dims(Bw_vect, 0)
        # print('Bw: ', Bw_vect)
        # print('Bw shape: ', Bw_vect.shape)

        # b_vect = b_vect + Bw_vect

        # gradX_It1_patch_vect = gradX_It1_patch_vect - Bw_vect.T
        # gradY_It1_patch_vect = gradY_It1_patch_vect - Bw_vect.T

        # print('b_vect shape: ', b_vect.shape)
        # print('gradX_It1_patch_vect shape: ', gradX_It1_patch_vect.shape)

        span_term = np.eye(B.shape[0], B.shape[0]) - np.matmul(B, B.T)
        # print('span_term: ', span_term)
        # print('span_term shape: ', span_term.shape)

        b_vect_new = np.matmul(span_term, b_vect)

        A_new = np.matmul(span_term, A)

        hessian = np.matmul(A_new.T, A_new)
        # print('Hessian shape: ', hessian.shape)

        p_delta = np.matmul(np.linalg.inv(hessian),
                            np.matmul(A_new.T, b_vect_new))
        p_delta = p_delta.flatten()
        # print('p_delta shape: ',p_delta.shape)

        p += p_delta

        p_delta_norm = np.sqrt(p_delta[0]**2 + p_delta[1]**2)
        print('image error: ', b.sum(), ', p_delta: ', p_delta, ', norm: ',
              p_delta_norm)

    return p
コード例 #9
0
def LucasKanade(It, It1, rect, p0=np.zeros(2)):
    # Input:
    #	It: template image
    #	It1: Current image
    #	rect: Current position of the car
    #	(top left, bot right coordinates)
    #	p0: Initial movement vector [dp_x0, dp_y0]
    # Output:
    #	p: movement vector [dp_x, dp_y]

    # Put your implementation here

    # pad_width = int(It.shape[1]/2)
    # It = np.pad(It, pad_width, mode='symmetric')
    # It1 = np.pad(It1, pad_width, mode='symmetric')

    p = p0

    width_patch = np.round(rect[2] - rect[0] + 1)
    height_patch = np.round(rect[3] - rect[1] + 1)

    gradX = cv2.Sobel(It1, -1, 1, 0, borderType=cv2.BORDER_CONSTANT)
    gradY = cv2.Sobel(It1, -1, 0, 1, borderType=cv2.BORDER_CONSTANT)
    # gradX, gradY = np.gradient(It1)

    ###################################  SPLINES  ########################################
    spline_It1 = RectBivariateSpline(np.arange(It1.shape[0]),
                                     np.arange(It1.shape[1]), It1)
    spline_It1_X_grad = RectBivariateSpline(np.arange(It1.shape[0]),
                                            np.arange(It1.shape[1]), gradX)
    spline_It1_Y_grad = RectBivariateSpline(np.arange(It1.shape[0]),
                                            np.arange(It1.shape[1]), gradY)

    threshold = 1.5e-3
    p_delta_norm = np.inf

    while (p_delta_norm > threshold):
        warp = rect + [p[0], p[1], p[0], p[1]]

        jacobian = np.asarray([[1, 0], [0, 1]])

        ################################### INTERPOLATING IT WARP ##################################
        It1_patch = spline_It1.__call__(
            np.linspace(warp[1], warp[3], height_patch, endpoint=True),
            np.linspace(warp[0], warp[2], width_patch, endpoint=True))

        #####################################  TEMPLATE ERROR  #####################################
        b = It - It1_patch
        b_vect = b.reshape(1, b.shape[0] * b.shape[1])
        # print('b flatten shape: ', b.shape)

        ################################### INTERPOLATING GRAD X and Y WARP ################################
        gradX_It1_patch = spline_It1_X_grad.__call__(
            np.linspace(warp[1], warp[3], height_patch, endpoint=True),
            np.linspace(warp[0], warp[2], width_patch, endpoint=True))
        gradY_It1_patch = spline_It1_Y_grad.__call__(
            np.linspace(warp[1], warp[3], height_patch, endpoint=True),
            np.linspace(warp[0], warp[2], width_patch, endpoint=True))

        gradX_It1_patch_vect = gradX_It1_patch.reshape(
            gradX_It1_patch.shape[0] * gradX_It1_patch.shape[1], 1)
        gradY_It1_patch_vect = gradY_It1_patch.reshape(
            gradY_It1_patch.shape[0] * gradY_It1_patch.shape[1], 1)

        ################################### STACKING GRAD X and Y WARP ################################
        grad_It1_patch = np.hstack(
            (gradX_It1_patch_vect, gradY_It1_patch_vect))
        # print('grad_It1_patch shape: ', grad_It1_patch.shape)

        A = np.matmul(grad_It1_patch, jacobian)
        # print('A: ', A)
        # print('A shape: ', A.shape)

        hessian = np.matmul(A.T, A)
        # print('Hessian shape: ', hessian.shape)

        p_delta = np.matmul(np.linalg.inv(hessian), np.matmul(A.T, b_vect.T))
        p_delta = p_delta.flatten()
        # print('p_delta shape: ',p_delta.shape)

        p += p_delta

        p_delta_norm = np.sqrt(p_delta[0]**2 + p_delta[1]**2)
        print('image error: ', b.sum(), ', p_delta: ', p_delta, ', norm: ',
              p_delta_norm)

    return p
コード例 #10
0
    def __call__(self, x, y):
        outside = self.is_outside_domain(x, y)

        return np.where(outside, self.fill_value,
                        RectBivariateSpline.__call__(self, x, y))
コード例 #11
0
def Interp2DScalar(list_rLs,
                   list_keys,
                   xs,
                   ys,
                   file_input,
                   do_clip=False,
                   clip_bounds=np.array([0.0,1.0]),
                   k_spline=1,
                   transforms=None):
    """
    Interpolation of scalar data in refened mesh onto continuous domain.

    Args:
    list_rLs - list of refinement levels for interpolation
    list_keys - list of HDF5 keys
    xs - output x coordinates
    ys - output y coordinates
    file_input - HDF5 file containing keys

    Kwargs:
    do_clip=False - bool - clip data outside clip_bounds
    clip_bounds=np.array([0.0,1.0]) - array(float) - bounds for clipping of data
    k_spline=1 - int - degree of spline representation
    transforms=None - list(str) - transformations to appy to data after interpolation

    Returns:
    array(float) - interpolated and possibly transformed data
    """
    output = np.zeros((len(xs),len(ys)))
    for rL in list_rLs:
        keys_rL = GetKeysForRefinement_Level(rL,list_keys)
        datasets_current = []
        for key in keys_rL:
            datasets_current.append(file_input[key])
        delta = datasets_current[0].attrs['delta'][0]
        for dataset in datasets_current:
            extent_current = GetDatasetExtent2D(dataset)
            extent_current_IG = GetDatasetExtentIncGhost2D(dataset)
            xs_domain = np.arange(extent_current_IG[0],extent_current_IG[1]+0.1*delta,delta)
            ys_domain = np.arange(extent_current_IG[2],extent_current_IG[3]+0.1*delta,delta)
            interp_spline = RectBivariateSpline(xs_domain,ys_domain,np.asarray(dataset).T,kx=k_spline,ky=k_spline)
            xs_mask = np.logical_and(extent_current[0]<=xs,xs<=extent_current[1])
            ys_mask = np.logical_and(extent_current[2]<=ys,ys<=extent_current[3])
            xs_current = np.extract(xs_mask,xs)
            ys_current = np.extract(ys_mask,ys)
            shift = np.array([np.argmax(xs_mask),np.argmax(ys_mask)])
            data_interp = interp_spline.__call__(xs_current,ys_current)
            np.copyto(output[shift[0]:shift[0]+len(xs_current),
                             shift[1]:shift[1]+len(ys_current)],
                             data_interp)
    if do_clip:
        np.clip(output,clip_bounds[0],clip_bounds[1],out=output)
    
    if transforms:
        for i in range(len(transforms)):
            if transforms[i] == "rotate_right_half":
                output = RotateAndCopyY(output)
            #elif transforms[i] == "reflect_y":
            #    output = ReflectY(output)
            else:
                raise ValueError("Unknown tranformation requested")
            
    return output
コード例 #12
0
def Interp2DSpeedDirection(list_rLs,
                           list_keys_velx,
                           list_keys_vely,
                           xs,
                           ys,
                           file_input_velx,
                           file_input_vely,
                           do_clip=[False,False],
                           clip_bounds=np.array([[0.0,1.0],[-np.pi,np.pi]]),
                           k_spline=1,
                           transforms=None):
    """
    Interpolation of velocity data in refened mesh onto continuous domain returning velocity magnitude and direction.

    Args:
    list_rLs - list of refinement levels for interpolation
    list_keys_velx - list of HDF5 keys for x component of velocity
    list_keys_vely - list of HDF5 keys for y component of velocity
    xs - output x coordinates
    ys - output y coordinates
    file_input_velx - HDF5 file containing keys
    file_input_velx - HDF5 file containing keys

    Kwargs:
    do_clip=[False,False] - list(bool) - clip data outside clip_bounds for speed and/or direction respectively
    clip_bounds=np.array([[0.0,1.0],[-np.pi,np.pi]]) - array(float) - bounds for clipping of data for speed and/or direction respectively
    k_spline=1 - int - degree of spline representation
    transforms=None - list(str) - transformations to appy to data after interpolation

    Returns:
    array(float) - interpolated and possibly transformed speed data
    array(float) - interpolated and possibly transformed direction data
    """
    output_speed = np.zeros((len(xs),len(ys)))
    output_direc = np.zeros((len(xs),len(ys)))
    
    for rL in list_rLs:
        keys_rL_velx = GetKeysForRefinement_Level(rL,list_keys_velx)
        keys_rL_vely = GetKeysForRefinement_Level(rL,list_keys_vely)
        datasets_current_velx = []
        datasets_current_vely = []
        for key in keys_rL_velx:
            datasets_current_velx.append(file_input_velx[key])
        for key in keys_rL_vely:
            datasets_current_vely.append(file_input_vely[key])
        delta = datasets_current_velx[0].attrs['delta'][0]
        for ds_idx in range(len(datasets_current_velx)):
            dataset_velx = datasets_current_velx[ds_idx]
            dataset_vely = datasets_current_vely[ds_idx]
            extent_current = GetDatasetExtent2D(dataset_velx)
            extent_current_IG = GetDatasetExtentIncGhost2D(dataset_velx)
            xs_domain = np.arange(extent_current_IG[0],extent_current_IG[1]+0.1*delta,delta)
            ys_domain = np.arange(extent_current_IG[2],extent_current_IG[3]+0.1*delta,delta)
            interp_spline_velx = RectBivariateSpline(xs_domain,ys_domain,np.asarray(dataset_velx).T,kx=k_spline,ky=k_spline)
            interp_spline_vely = RectBivariateSpline(xs_domain,ys_domain,np.asarray(dataset_vely).T,kx=k_spline,ky=k_spline)
            xs_mask = np.logical_and(extent_current[0]<=xs,xs<=extent_current[1])
            ys_mask = np.logical_and(extent_current[2]<=ys,ys<=extent_current[3])
            xs_current = np.extract(xs_mask,xs)
            ys_current = np.extract(ys_mask,ys)
            shift = np.array([np.argmax(xs_mask),np.argmax(ys_mask)])
            data_interp_velx = interp_spline_velx.__call__(xs_current,ys_current)
            data_interp_vely = interp_spline_vely.__call__(xs_current,ys_current)
            data_interp_speed = np.sqrt(data_interp_velx**2 + data_interp_vely**2)
            data_interp_direc = np.arctan2(data_interp_vely,data_interp_velx)
            np.copyto(output_speed[shift[0]:shift[0]+len(xs_current),
                                   shift[1]:shift[1]+len(ys_current)],
                                   data_interp_speed)
            np.copyto(output_direc[shift[0]:shift[0]+len(xs_current),
                                   shift[1]:shift[1]+len(ys_current)],
                                   data_interp_direc)
            
    if do_clip[0]:
        np.clip(output_speed,clip_bounds[0,0],clip_bounds[0,1],out=output_speed)
    if do_clip[1]:
        np.clip(output_direc,clip_bounds[1,0],clip_bounds[1,1],out=output_direc)
        
    if transforms:
        for i in range(len(transforms)):
            if transforms[i] == "rotate_right_half":
                output_speed = RotateAndCopyY(output_speed)
                output_direc = RotateAndCopyY_angle(output_direc)
            #elif transforms[i] == "reflect_y":
            #    output_speed = ReflectY(output_speed)
            else:
                raise ValueError("Unknown tranformation requested")
    
    return output_speed, output_direc