Пример #1
0
def derivatives(Image1,Image2,u,v,h,b):
    N,M=Image1.shape
    y=np.linspace(0,N-1,N)
    x=np.linspace(0,M-1,M)
    x,y=np.meshgrid(x,y)
    Ix=np.zeros((N,M))
    Iy=np.zeros((N,M))
    x=x+u; y=y+v
    WImage,I2x,I2y=warp_image2(Image2,x,y,h)  # Derivatives of the secnd image 

    It= WImage-Image1 # Temporal deriv
    
    Ix=filter2(Image1, h) # spatial derivatives for the first image 
    Iy=filter2(Image1, h.T)
    '''print('I1x')
    print(Ix[0:10,0:10])
    print('I1y')
    print(Iy[0:10,0:10])'''
    Ix  = b*I2x+(1-b)*Ix           # Averaging 
    Iy  = b*I2y+(1-b)*Iy


    It=np.nan_to_num(It) #Remove Nan values on the derivatives 
    Ix=np.nan_to_num(Ix)
    Iy=np.nan_to_num(Iy)
    out_bound= np.where((y > N-1) | (y<0) | (x> M-1) | (x<0))
    Ix[out_bound]=0 # setting derivatives value on out of bound pixels to 0  
    Iy[out_bound]=0
    It[out_bound]=0
    return [Ix,Iy,It]
Пример #2
0
def HornSchunck(im1, im2, alpha=0.001, Niter=8, verbose=False):
    """
    im1: image at t=0
    im2: image at t=1
    alpha: regularization constant
    Niter: number of iteration
    """
    im1 = im1.astype(np.float32)
    im2 = im2.astype(np.float32)

    #set up initial velocities
    uInitial = np.zeros([im1.shape[0], im1.shape[1]])
    vInitial = np.zeros([im1.shape[0], im1.shape[1]])

    # Set initial value for the flow vectors
    U = uInitial
    V = vInitial

    # Estimate derivatives
    [fx, fy, ft] = computeDerivatives(im1, im2)

    # Iteration to reduce error
    for _ in range(Niter):
        #%% Compute local averages of the flow vectors
        uAvg = filter2(U, HSKERN)
        vAvg = filter2(V, HSKERN)
        #%% common part of update step
        der = (fx * uAvg + fy * vAvg + ft) / (alpha**2 + fx**2 + fy**2)
        #%% iterative step
        U = uAvg - fx * der
        V = vAvg - fy * der

    M = pow(pow(U, 2) + pow(V, 2), 0.5)

    return U, V, M
def generate_invmatrix(im, h, dx):
    D = np.array([[0, -1, 0], [0, 0, 0], [0, 1, 0]]) / 2
    # partial derivative
    M = np.array([[1, 0, -1], [0, 0, 0], [-1, 0, 1]]) / 4
    # mixed partial derivatives
    F = np.array([[0, 0, 0], [0, 1, 1], [0, 1, 1]]) / 4
    # average
    D2 = np.array([[0, 1, 0], [0, -2, 0], [0, 1, 0]])
    # partial derivative
    H = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])

    r, c = im.shape

    h = np.double(h)

    cmtx = filter2(np.ones(im.shape), H / (dx * dx))

    A11 = im * (filter2(im, D2 / (dx * dx), mode='nearest') - 2 * im /
                (dx * dx)) - h * cmtx
    A22 = im * (filter2(im, D2.transpose() /
                        (dx * dx), mode='nearest') - 2 * im /
                (h * h)) - h * cmtx
    A12 = im * filter2(im, M / (dx * dx), mode='nearest')

    DetA = A11 * A22 - A12 * A12

    B11 = A22 / DetA
    B12 = -A12 / DetA
    B22 = A11 / DetA

    return B11, B12, B22
Пример #4
0
def derivatives(Image1, Image2, u, v, h, b):
    N, M = Image1.shape
    #x = np.array(range(N))
    #y = np.array(range(M))
    y = np.linspace(0, N - 1, N)
    x = np.linspace(0, M - 1, M)
    x, y = np.meshgrid(x, y)
    Ix = np.zeros((N, M))
    Iy = np.zeros((N, M))
    x = x + u
    y = y + v
    WImage, I2x, I2y = warp_image2(Image2, x, y,
                                   h)  # Derivatives of the secnd image
    '''Gt = np.ones((2, 2)) * 0.25
    It = filter2(Image1, -Gt) + filter2(WImage, Gt)'''
    It = WImage - Image1  # Temporal deriv

    Ix = filter2(Image1, h)  # spatial derivatives for the first image
    Iy = filter2(Image1, h.T)
    #print("shapes",np.array(0.5*Ix).shape)
    #print('bshape',b)
    Ix = b * I2x + (1 - b) * Ix  # Averaging
    Iy = b * I2y + (1 - b) * Iy

    It = np.nan_to_num(It)  #Remove Nan values on the derivatives
    Ix = np.nan_to_num(Ix)
    Iy = np.nan_to_num(Iy)
    out_bound = np.where((y > N - 1) | (y < 0) | (x > M - 1) | (x < 0))
    Ix[out_bound] = 0  # setting derivatives value on out of bound pixels to 0
    Iy[out_bound] = 0
    It[out_bound] = 0
    return [Ix, Iy, It]
Пример #5
0
def HS_Algorithm(im1, im2, *, alpha=0.001, Niter=8):

    im1 = im1.astype(np.float32)
    im2 = im2.astype(np.float32)

    # set up initial velocities
    uInitial = np.zeros([im1.shape[0], im1.shape[1]])
    vInitial = np.zeros([im1.shape[0], im1.shape[1]])

    U = uInitial
    V = vInitial

    # Estimate derivatives
    [fx, fy, ft] = computeDerivatives(im1, im2)

    for _ in range(Niter):

        uAvg = filter2(U, HSKERN)
        vAvg = filter2(V, HSKERN)

        der = (fx * uAvg + fy * vAvg + ft) / (alpha**2 + fx**2 + fy**2)
        U = uAvg - fx * der
        V = vAvg - fy * der

    return U, V
Пример #6
0
def computeHS(beforeImg, afterImg, alpha, delta,itmax,u,v):
    #removing noise
    beforeImg  = cv.GaussianBlur(beforeImg, (5, 5), 0)
    afterImg = cv.GaussianBlur(afterImg, (5, 5), 0)

    # set up initial values

    fx, fy, ft = image_derivatives(beforeImg, afterImg)
    avg_kernel = np.array([[1 / 12, 1 / 6, 1 / 12],
                            [1 / 6, 0, 1 / 6],
                            [1 / 12, 1 / 6, 1 / 12]], float)
    iter_counter = 0
    while True:
        iter_counter += 1
        u_avg = filter2(u, avg_kernel)
        v_avg = filter2(v, avg_kernel)
        p = fx * u_avg + fy * v_avg + ft
        d = 4 * alpha**2 + fx**2 + fy**2
        prev = u

        u = u_avg - fx * (p / d)
        v = v_avg - fy * (p / d)

        diff = np.linalg.norm(u - prev, 2)
        #print(iter_counter)
        if  diff < delta or iter_counter > itmax:
            print("iteration number: ", iter_counter)
            print('erreur=',diff)
            
            break
    #draw_quiver(u, v, beforeImg)
    return [u, v]
Пример #7
0
def HS_helper(alpha, Niter, kernel, U, V, fx, fy, ft):
    for _ in np.arange(Niter):
        #%% Compute local averages of the flow vectors
        uAvg = filter2(U,kernel, mode='mirror') #uBar in the paper
        vAvg = filter2(V,kernel, mode='mirror') #vBar in the paper
        
        U, V = HS_helper2(alpha, fx, fy, ft, uAvg, vAvg)
    return U, V
Пример #8
0
def HornSchunck(im1: np.ndarray,
                im2: np.ndarray,
                *,
                alpha: float = 0.001,
                Niter: int = 8,
                verbose: bool = False) -> Tuple[np.ndarray, np.ndarray]:
    """

    Parameters
    ----------

    im1: numpy.ndarray
        image at t=0
    im2: numpy.ndarray
        image at t=1
    alpha: float
        regularization constant
    Niter: int
        number of iteration
    """
    im1 = im1.astype(np.float32)
    im2 = im2.astype(np.float32)

    # set up initial velocities
    uInitial = np.zeros([im1.shape[0], im1.shape[1]])
    vInitial = np.zeros([im1.shape[0], im1.shape[1]])

    # Set initial value for the flow vectors
    U = uInitial
    V = vInitial

    # Estimate derivatives
    [fx, fy, ft] = computeDerivatives(im1, im2)

    if verbose:
        from .plots import plotderiv
        plotderiv(fx, fy, ft)


#    print(fx[100,100],fy[100,100],ft[100,100])

# Iteration to reduce error
    for _ in range(Niter):
        # %% Compute local averages of the flow vectors
        uAvg = filter2(U, HSKERN)
        vAvg = filter2(V, HSKERN)
        # %% common part of update step
        der = (fx * uAvg + fy * vAvg + ft) / (alpha**2 + fx**2 + fy**2)
        # %% iterative step
        U = uAvg - fx * der
        V = vAvg - fy * der

    return U, V
Пример #9
0
def warp_image2(Image, XI, YI, h):

    # We add the flow estimated to the second image coordinates, remap them towards the ogriginal image  and finally  calculate the derivatives of the warped image
    Image = np.array(Image, np.float32)
    XI = np.array(XI, np.float32)
    YI = np.array(YI, np.float32)
    WImage = cv2.remap(Image, XI, YI, interpolation=cv2.INTER_CUBIC)
    Ix = filter2(WImage, h)
    Iy = filter2(WImage, h.T)

    Iy = cv2.remap(Iy, XI, YI, interpolation=cv2.INTER_CUBIC)
    Ix = cv2.remap(Ix, XI, YI, interpolation=cv2.INTER_CUBIC)
    return [WImage, Ix, Iy]
Пример #10
0
def op(im1,
       im2,
       uInitial,
       vInitial,
       sigma=1.0,
       alpha=0.001,
       Niter=8,
       verbose=False):
    """
    im1: image at t=0
    im2: image at t=1
    alpha: regularization constant
    Niter: number of iteration
    """
    im1 = im1.astype(np.float32)
    im2 = im2.astype(np.float32)

    im1 = lowpassfilt(im1, sigma)
    im2 = lowpassfilt(im2, sigma)

    # set up initial velocities
    if uInitial.shape[0] < 2:
        uInitial = np.zeros([im1.shape[0], im1.shape[1]])
    if vInitial.shape[0] < 2:
        vInitial = np.zeros([im1.shape[0], im1.shape[1]])

    # Set initial value for the flow vectors
    U = uInitial
    V = vInitial

    # Estimate derivatives
    [fx, fy, ft] = computeDerivatives(im1, im2)

    #if verbose:
    #    from .plots import plotderiv
    #    plotderiv(fx, fy, ft)

    #    print(fx[100,100],fy[100,100],ft[100,100])

    # Iteration to reduce error
    for it in range(Niter):
        # %% Compute local averages of the flow vectors
        uAvg = filter2(U, HSKERN)
        vAvg = filter2(V, HSKERN)
        # %% common part of update step
        der = (fx * uAvg + fy * vAvg + ft) / (alpha**2 + fx**2 + fy**2)
        # %% iterative step
        U = uAvg - fx * der
        V = vAvg - fy * der

    return U, V
def computeHS(name1, name2, alpha, delta):
    path = os.path.join(os.path.dirname(__file__), 'test images')
    beforeImg = cv2.imread(os.path.join(path, name1), cv2.IMREAD_GRAYSCALE)
    afterImg = cv2.imread(os.path.join(path, name2), cv2.IMREAD_GRAYSCALE)

    if beforeImg is None:
        raise NameError("Can't find image: \"" + name1 + '\"')
    elif afterImg is None:
        raise NameError("Can't find image: \"" + name2 + '\"')

    beforeImg = cv2.imread(os.path.join(path, name1),
                           cv2.IMREAD_GRAYSCALE).astype(float)
    afterImg = cv2.imread(os.path.join(path, name2),
                          cv2.IMREAD_GRAYSCALE).astype(float)

    #removing noise
    beforeImg = cv2.GaussianBlur(beforeImg, (5, 5), 0)
    afterImg = cv2.GaussianBlur(afterImg, (5, 5), 0)

    # set up initial values
    u = np.zeros((beforeImg.shape[0], beforeImg.shape[1]))
    v = np.zeros((beforeImg.shape[0], beforeImg.shape[1]))
    fx, fy, ft = get_derivatives(beforeImg, afterImg)
    avg_kernel = np.array(
        [[1 / 12, 1 / 6, 1 / 12], [1 / 6, 0, 1 / 6], [1 / 12, 1 / 6, 1 / 12]],
        float)
    iter_counter = 0
    while True:
        iter_counter += 1
        u_avg = filter2(u, avg_kernel)
        v_avg = filter2(v, avg_kernel)
        p = fx * u_avg + fy * v_avg + ft
        d = 4 * alpha**2 + fx**2 + fy**2
        prev = u

        u = u_avg - fx * (p / d)
        v = v_avg - fy * (p / d)

        diff = np.linalg.norm(u - prev, 2)
        #converges check (at most 300 iterations)
        if diff < delta or iter_counter > 300:
            # print("iteration number: ", iter_counter)
            break

    draw_quiver(u, v, beforeImg)

    return [u, v]
Пример #12
0
def motion_model_baseline(I1, I2, u, v):

    ## Parameters of optical flow
    dx = 1
    dy = 1
    du2 = dx * dx
    dv2 = dy * dy
    Lambda = 0.001
    dtau = 0.01

    ## Kernels for performing the gradient operations
    Horn_Schunck_Kernel = np.array(
        [[1 / 12, 1 / 6, 1 / 12], [1 / 6, 0, 1 / 6], [1 / 12, 1 / 6, 1 / 12]],
        float)

    ## Convert the images to numpy float32 for processing
    I1 = I1.astype(np.float32)
    I2 = I2.astype(np.float32)

    ## Initial guess on the vector fields
    u0 = np.zeros((u.shape[0], u.shape[1]))
    v0 = np.zeros((v.shape[0], v.shape[1]))
    u = u0
    v = v0

    ## Gradients of the image
    Ix = (I1[2:, 1:-1] - I1[1:-1, 1:-1] + I1[2:, 2:] - I1[1:-1, 2:] +
          I2[2:, 1:-1] - I2[1:-1, 1:-1] + I2[2:, 2:] - I2[1:-1, 2:]) / 4.0
    Iy = (I1[1:-1, 2:] - I1[1:-1, 1:-1] + I1[2:, 2:] - I1[2:, 1:-1] +
          I2[1:-1, 2:] - I2[1:-1, 1:-1] + I2[2:, 2:] - I2[2:, 1:-1]) / 4.0
    It = (I2[1:-1, 1:-1] - I1[1:-1, 1:-1] + I2[1:-1, 2:] - I1[1:-1, 2:] +
          I2[2:, 1:-1] - I1[1:-1, 2:] + I2[2:, 2:] - I1[2:, 2:]) / 4.0

    for i in range(20):

        ## Laplacian of the vector fields
        u_L = filter2(u, Horn_Schunck_Kernel)
        v_L = filter2(v, Horn_Schunck_Kernel)

        optical_flow_constraint = (Ix * u_L[1:-1, 1:-1] + Iy * v_L[1:-1, 1:-1]
                                   + It) / (Lambda**2 + Ix**2 + Iy**2)

        ## Update the vector field
        u[1:-1, 1:-1] = u_L[1:-1, 1:-1] - Ix * optical_flow_constraint
        v[1:-1, 1:-1] = v_L[1:-1, 1:-1] - Iy * optical_flow_constraint

    return u, v
def HS(im1, im2, alpha, Niter):
    """
    im1: image at t=0
    im2: image at t=1
    alpha: regularization constant
    Niter: number of iteration
    """

    #set up initial velocities
    uInitial = np.zeros([im1.shape[0], im1.shape[1]])
    vInitial = np.zeros([im1.shape[0], im1.shape[1]])

    # Set initial value for the flow vectors
    U = uInitial
    V = vInitial

    # Estimate derivatives
    [fx, fy, ft] = computeDerivatives(im1, im2)

    # fg,ax = plt.subplots(1,3,figsize=(18,5))
    # for f,a,t in zip((fx,fy,ft),ax,('$f_x$','$f_y$','$f_t$')):
    #     h=a.imshow(f,cmap='bwr')
    #     a.set_title(t)
    #     fg.colorbar(h,ax=a)

    # Averaging kernel
    kernel = np.array(
        [[1 / 12, 1 / 6, 1 / 12], [1 / 6, 0, 1 / 6], [1 / 12, 1 / 6, 1 / 12]],
        float)

    # print(fx[100,100],fy[100,100],ft[100,100])

    # Iteration to reduce error
    for _ in range(Niter):
        #%% Compute local averages of the flow vectors
        uAvg = filter2(U, kernel)
        vAvg = filter2(V, kernel)

        #%% common part of update step
        der = (fx * uAvg + fy * vAvg + ft) / (alpha**2 + fx**2 + fy**2)

        #%% iterative step
        U = uAvg - fx * der
        V = vAvg - fy * der

    return U, V
Пример #14
0
def HornSchunck(im1,
                im2,
                alpha: float = 0.001,
                Niter: int = 8,
                verbose: bool = False,
                type: int = 1):

    # im1: image at t=0
    # im2: image at t=1
    # alpha: regularization constant
    # Niter: number of iteration

    im1 = im1.astype(np.float32)
    im2 = im2.astype(np.float32)

    U = np.zeros([im1.shape[0], im1.shape[1]])
    V = np.zeros([im1.shape[0], im1.shape[1]])

    mensage = " Inicializated "
    if type == 1 or type == 2:
        [fx, fy, ft] = computeDerivatives(im1, im2)
        mensage += 'with Derivative 1 '
    if type == 3 or type == 4:
        [fx, fy, ft] = computeDerivatives2(im1, im2)
        mensage += 'with Derivative 2 '
    if type == 2 or type == 4:
        U, V = calculate_UV(U, V, fx, fy, ft)
        mensage += 'and u,v equal to q (nearest point of the line) '
    else:
        mensage += 'and u,v equal to 0'

    if verbose:
        plotderiv(fx, fy, ft, mensage)
        print("[*]Tipo:" + mensage + '\n  ')
        print('   Mean x ' + str(np.sum(cv2.mean(fx))))
        print('   Mean y ' + str(np.sum(cv2.mean(fy))))
        print('   Mean t ' + str(np.sum(cv2.mean(ft))))

    for _ in range(Niter):
        uAvg = filter2(U, HSKERN)
        vAvg = filter2(V, HSKERN)
        der = (fx * uAvg + fy * vAvg + ft) / (alpha**2 + fx**2 + fy**2)
        U = uAvg - fx * der
        V = vAvg - fy * der

    return U, V, mensage
Пример #15
0
def warping_step(beforeImg, afterImg, alpha, delta, u, v, kernel_size,
                 downsampling_gauss, h, b, factor):
    #removing noise
    deviation_gausse = 1 / math.sqrt(2 * downsampling_gauss)
    beforeImg = cv2.GaussianBlur(beforeImg,
                                 ksize=(kernel_size, kernel_size),
                                 sigmaX=deviation_gausse,
                                 sigmaY=deviation_gausse)
    afterImg = cv2.GaussianBlur(afterImg,
                                ksize=(kernel_size, kernel_size),
                                sigmaX=deviation_gausse,
                                sigmaY=deviation_gausse)
    #u0=u; v0=v;
    # set up initial values

    fx, fy, ft = derivatives(beforeImg, afterImg, u, v, h, b)
    u0 = u
    v0 = v
    avg_kernel = np.array(
        [[1 / 12, 1 / 6, 1 / 12], [1 / 6, 0, 1 / 6], [1 / 12, 1 / 6, 1 / 12]],
        np.float)
    for iter in range(10):
        u_avg = filter2(u, avg_kernel)
        v_avg = filter2(v, avg_kernel)
        p = fx * u_avg + fy * v_avg + ft
        d = alpha + fx**2 + fy**2
        u = u_avg - fx * (p / d)
        v = v_avg - fy * (p / d)
        print(
            'AVE',
            np.linalg.norm(u - u0) / np.linalg.norm(u) +
            np.linalg.norm(v - v0) / np.linalg.norm(v))
        u0 = u
        v0 = v
        #u=median_filter(u,size=5)
        #v=median_filter(v,size=5)
    u = medfilt(u, kernel_size=5)
    v = medfilt(v, kernel_size=5)

    #u=Expand(u,factor)
    #v=Expand(v,factor)

    return [u, v]
Пример #16
0
def HornSchunck(im1, im2, alpha, Niter):
    """

    Parameters
    ----------

    im1: numpy.ndarray
        image at t=0
    im2: numpy.ndarray
        image at t=1
    alpha: float
        regularization constant
    Niter: int
        number of iteration
    """
    im1 = im1.astype(np.float32)
    im2 = im2.astype(np.float32)

    # set up initial velocities
    uInitial = np.zeros([im1.shape[0], im1.shape[1]])
    vInitial = np.zeros([im1.shape[0], im1.shape[1]])

    # Set initial value for the flow vectors
    U = uInitial
    V = vInitial

    # Estimate derivatives
    [fx, fy, ft] = computeDerivatives(im1, im2)

    # Iteration to reduce error
    for _ in range(Niter):
        # Compute local averages of the flow vectors
        uAvg = filter2(U, HSKERN)
        vAvg = filter2(V, HSKERN)
        # common part of update step
        der = (fx * uAvg + fy * vAvg + ft) / (alpha**2 + fx**2 + fy**2)
        # iterative step
        U = uAvg - fx * der
        V = vAvg - fy * der

    return U, V
Пример #17
0
def computeDerivatives2(im1, im2):
    global I
    fx = filter2(im1, kernelX2) + filter2(im2, kernelX2)
    fy = filter2(im1, kernelY2) + filter2(im2, kernelY2)

    ft = filter2(im1, kernelT2) + filter2(im2, -kernelT2)
    return fx, fy, ft
def computeDerivatives(im1, im2):

    fx = filter2(im1, kernelX) + filter2(im2, kernelX)
    fy = filter2(im1, kernelY) + filter2(im2, kernelY)
    ft = filter2(im1, kernelT) + filter2(im2, -kernelT)

    return fx, fy, ft
def computeDerivatives(
        im1: np.ndarray,
        im2: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
    fx = filter2(im1, kernelX) + filter2(im2, kernelX)
    fy = filter2(im1, kernelY) + filter2(im2, kernelY)
    # ft = im2 - im1
    ft = filter2(im1, kernelT) + filter2(im2, -kernelT)
    return fx, fy, ft
Пример #20
0
def image_derivatives(img1, img2):
    # Computing Image derivatives 
    Gx = np.array([[-1, 1], [-1, 1]]) * 0.25
    Gy = np.array([[-1, -1], [1, 1]]) * 0.25
    Gt = np.ones((2, 2)) * 0.25
    fx = filter2(img1,Gx) + filter2(img2,Gx)
    fy = filter2(img1, Gy) + filter2(img2, Gy)
    ft = filter2(img1, -Gt) + filter2(img2, Gt)
    return [fx,fy, ft]
Пример #21
0
    def compute_horn_schunck(self, im1, im2, alpha=1.0, Niter=8):
        g_im1 = cv.cvtColor(im1, cv.COLOR_BGR2GRAY)
        g_im2 = cv.cvtColor(im2, cv.COLOR_BGR2GRAY)

        hsv = np.zeros_like(im1)
        # Sets image saturation to maximum
        hsv[..., 1] = 255

        # set up initial velocities
        uInitial = np.zeros([g_im1.shape[0], g_im1.shape[1]])
        vInitial = np.zeros([g_im1.shape[0], g_im1.shape[1]])

        # Set initial value for the flow vectors
        U = uInitial
        V = vInitial

        # Estimate derivatives
        [fx, fy, ft] = self.compute_derivatives(g_im1, g_im2)

        kernel = np.matrix([[1 / 12, 1 / 6, 1 / 12], [1 / 6, 0, 1 / 6],
                            [1 / 12, 1 / 6, 1 / 12]])

        # Iteration to reduce error
        for _ in range(Niter):
            # Compute local averages of the flow vectors
            uAvg = filter2(U, kernel)
            vAvg = filter2(V, kernel)

            der = (fx * uAvg + fy * vAvg + ft) / (alpha**2 + fx**2 + fy**2)
            U = uAvg - fx * der
            V = vAvg - fy * der

        mag, ang = cv.cartToPolar(U, V)
        hsv[..., 0] = ang * 180 / np.pi / 2
        hsv[..., 2] = cv.normalize(mag, None, 0, 255, cv.NORM_MINMAX)
        rgb = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)
Пример #22
0
def HS(im1, im2, alpha=1, Niter=10):
    """
	im1: image at t=0
	im2: image at t=1
	alpha: regularization constant
	Niter: number of iteration
	"""

    #set up initial velocities
    uInitial = np.zeros([im1.shape[0], im1.shape[1]])
    vInitial = np.zeros([im1.shape[0], im1.shape[1]])

    # Set initial value for the flow vectors
    U = uInitial
    V = vInitial

    # Estimate derivatives
    [fx, fy, ft] = computeDerivatives(im1, im2)

    # Averaging kernel
    kernel = np.array(
        [[1 / 12, 1 / 6, 1 / 12], [1 / 6, 0, 1 / 6], [1 / 12, 1 / 6, 1 / 12]],
        float)

    # Iteration to reduce error
    for _ in range(Niter):
        #Compute local averages of the flow vectors
        uAvg = filter2(U, kernel)
        vAvg = filter2(V, kernel)
        #common part of update step
        der = (fx * uAvg + fy * vAvg + ft) / (alpha**2 + fx**2 + fy**2)
        #iterative step
        U = uAvg - fx * der
        V = vAvg - fy * der

    return U, V
Пример #23
0
def computeDerivatives(
        im1: np.ndarray,
        im2: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:

    fx = filter2(im1, Xkernel) + filter2(im2, Xkernel)
    fy = filter2(im1, Ykernel) + filter2(im2, Ykernel)

    # ft = im2 - im1
    ft = filter2(im1, Tkernel) + filter2(im2, -Tkernel)

    return fx, fy, ft
def get_derivatives(img1, img2):
    #derivative masks
    x_kernel = np.array([[-1, 1], [-1, 1]]) * 0.25
    y_kernel = np.array([[-1, -1], [1, 1]]) * 0.25
    t_kernel = np.ones((2, 2)) * 0.25

    fx = filter2(img1, x_kernel) + filter2(img2, x_kernel)
    fy = filter2(img1, y_kernel) + filter2(img2, y_kernel)
    ft = filter2(img1, -t_kernel) + filter2(img2, t_kernel)

    return [fx, fy, ft]
Пример #25
0
def computeDerivatives(im1, im2):
    #%% build kernels for calculating derivatives
    kernelX = np.array([[-1, 1], [-1, 1]]) * .25  #kernel for computing d/dx
    kernelY = np.array([[-1, -1], [1, 1]]) * .25  #kernel for computing d/dy
    kernelT = np.ones((2, 2)) * .25

    fx = filter2(im1, kernelX) + filter2(im2, kernelX)
    fy = filter2(im1, kernelY) + filter2(im2, kernelY)

    #ft = im2 - im1
    ft = filter2(im1, kernelT) + filter2(im2, -kernelT)

    return fx, fy, ft
def conv2(x, y, mode='same'):
    """
    Emulate the function conv2 from Mathworks.

    Usage:

    z = conv2(x,y,mode='same')

    TODO: 
     - Support other modes than 'same' (see conv2.m)
    """

    if not (mode == 'same'):
        raise Exception("Mode not supported")

    # Add singleton dimensions
    if (len(x.shape) < len(y.shape)):
        dim = x.shape
        for i in range(len(x.shape), len(y.shape)):
            dim = (1, ) + dim
        x = x.reshape(dim)
    elif (len(y.shape) < len(x.shape)):
        dim = y.shape
        for i in range(len(y.shape), len(x.shape)):
            dim = (1, ) + dim
        y = y.reshape(dim)

    origin = ()

    # Apparently, the origin must be set in a special way to reproduce
    # the results of scipy.signal.convolve and Matlab
    for i in range(len(x.shape)):
        if ((x.shape[i] - y.shape[i]) % 2 == 0 and x.shape[i] > 1
                and y.shape[i] > 1):
            origin = origin + (-1, )
        else:
            origin = origin + (0, )

    z = filter2(x, y, mode='constant', origin=origin)

    return z
Пример #27
0
def computeDerivatives(im1, im2):
    eMode='mirror' #Convolution extension mode
    #%% build kernels for calculating derivatives
    kernelX = np.array([[-1, 1],
                        [-1, 1]], dtype='float32') * .25 #kernel for computing d/dx
    kernelY = np.array([[-1,-1],
                        [ 1, 1]], dtype='float32') * .25 #kernel for computing d/dy

    
    kernelT = np.ones((2,2), dtype='float32')*.25

    #Ex in the paper (in the centre of 2x2 cube)
    fx = filter2(im1, kernelX, mode=eMode) + filter2(im2, kernelX, mode=eMode)
    #Ey in the paper (in the centre of 2x2 cube)
    fy = filter2(im1, kernelY, mode=eMode) + filter2(im2, kernelY, mode=eMode)

    #ft = im2 - im1
    #Et in the paper (in the centre of 2x2 cube)
    ft = filter2(im2, kernelT, mode=eMode) + filter2(im1,-kernelT, mode=eMode)

    return fx,fy,ft
def physicsBasedOpticalFlowLiuShen(im1, im2, h, U, V):
    # new model
    #Dm=0*10**(-3);
    #f=Dm*laplacian(im1,1);
    f = 0

    maxnum = 60
    tol = 1e-8
    dx = 1
    dt = 1
    # unit time

    #
    # I: intensity function
    # Ix: partial derivative for x-axis
    # Iy: partial derivative for y-axis
    # It: partial derivative for time t
    # f: related all boundary assumption
    # lambda: regularization parameter
    # nb: the neighborhood information
    #
    #-------------------------------------------------------------------
    D = np.array([[0, -1, 0], [0, 0, 0], [0, 1, 0]]) / 2
    # partial derivative
    M = np.array([[1, 0, -1], [0, 0, 0], [-1, 0, 1]]) / 4
    # mixed partial derivatives
    F = np.array([[0, 1, 0], [0, 0, 0], [0, 1, 0]])
    # average
    D2 = np.array([[0, 1, 0], [0, -2, 0], [0, 1, 0]])
    # partial derivative
    H = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])
    #------------------------------------------------------------------
    #------------------------------------------------------------------
    IIx = im1 * filter2(im1, D / dx, mode='nearest')
    IIy = im1 * filter2(im1, D.transpose() / dx, mode='nearest')
    II = im1 * im1
    Ixt = im1 * filter2((im2 - im1) / dt, D / dx, mode='nearest')
    Iyt = im1 * filter2((im2 - im1) / dt, D.transpose() / dx, mode='nearest')

    k = 0
    total_error = 100000000
    u = np.float64(U)
    v = np.float64(V)

    r, c = im2.shape

    #------------------------------------------------------------------
    B11, B12, B22 = generate_invmatrix(im1, h, dx)

    error = 0
    while total_error > tol and k < maxnum:
        bu = 2*IIx*filter2(u, D/dx, mode='nearest') + IIx*filter2(v, D.transpose()/dx, mode='nearest') + \
               IIy*filter2(v, D/dx, mode='nearest') + II*filter2(u, F/(dx*dx), mode='nearest') + \
               II*filter2(v, M/(dx*dx), mode='nearest') + h*filter2(u, H/(dx*dx))+Ixt

        bv = IIy*filter2(u, D/dx, mode='nearest') + IIx*filter2(u, D.transpose()/dx, mode='nearest') + \
            2*IIy*filter2(v, D.transpose()/dx, mode='nearest') + II*filter2(u, M/(dx*dx), mode='nearest') + \
            II*filter2(v, F.transpose()/(dx*dx), mode='nearest') + h*filter2(v, H/(dx*dx))+Iyt

        unew, vnew, total_error = helper(B11, B12, B22, bu, bv, u, v, r, c)
        #Do not use scipy.linalg.norm, use instead numpy.linalg.norm, because it is causing contentions and kernel yield,
        #scipy is making the kernel time much longer like 60% of the time, slowing down the machine, by severely increasing the
        #system load average.
        #total_error = (norm(unew-u,'fro')+norm(vnew-v,'fro'))/(r*c)

        u = unew
        v = vnew
        error = total_error
        k = k + 1

    return np.float32(u), np.float32(v), error
Пример #29
0
def decompo_texture(im, theta, nIters, alp, isScale):
    IM = scale_image(im, -1, 1)
    #print(IM)
    im = scale_image(im, -1, 1)
    '''print('im')

    print(im)'''
    p = np.zeros((im.shape[0], im.shape[1], 2), dtype=np.float32)
    delta = 1.0 / (4.0 * theta)
    I = np.squeeze(IM)
    for iter in range(nIters):

        #Compute divergence        eqn(8)
        #div_p =filter2(p[:im.shape[0],:im.shape[1],0], np.array([[-1, 1, 0]]))+ filter2(p[:im.shape[0],:im.shape[1],1], np.array( [[-1]  , [1], [0]]))
        div_p = correlate(p[:im.shape[0], :im.shape[1], 0],
                          np.array([[-1, 1, 0]]),
                          mode='wrap') + correlate(
                              p[:im.shape[0], :im.shape[1], 1],
                              np.array([[-1], [1], [0]]),
                              mode='wrap')
        '''print('div')
        print(div_p)'''

        I_x = filter2(I + theta * div_p, np.array([[1, -1]]))

        I_y = filter2(I + theta * div_p, np.array([[1], [-1]]))

        # Update dual variable      eqn(9)
        p[:im.shape[0], :im.shape[1],
          0] = p[:im.shape[0], :im.shape[1], 0] + delta * I_x
        p[:im.shape[0], :im.shape[1],
          1] = p[:im.shape[0], :im.shape[1], 1] + delta * I_y

        # Reproject to |p| <= 1     eqn(10)

        reprojection = np.maximum(
            1.0,
            np.sqrt(
                np.multiply(p[:im.shape[0], :im.shape[1],
                              0], p[:im.shape[0], :im.shape[1], 0]) +
                np.multiply(p[:im.shape[0], :im.shape[1],
                              1], p[:im.shape[0], :im.shape[1], 1])))
        #print('repre',reprojection)
        p[:im.shape[0], :im.shape[1],
          0] = p[:im.shape[0], :im.shape[1], 0] / reprojection
        p[:im.shape[0], :im.shape[1],
          1] = p[:im.shape[0], :im.shape[1], 1] / reprojection
        #print(p[:im.shape[0],:im.shape[1],0])

    # compute divergence
    div_p = correlate(
        p[:im.shape[0], :im.shape[1], 0], np.array([[-1, 1, 0]]),
        mode='wrap') + correlate(p[:im.shape[0], :im.shape[1], 1],
                                 np.array([[-1], [1], [0]]),
                                 mode='wrap')

    #compute structure component
    IM[:im.shape[0], :im.shape[1]] = I + theta * div_p
    '''print('IM')
    print(IM)'''

    if (isScale):
        '''print('im')
        print(im)
        print('alp*im')
        print(alp*im)'''
        texture = np.squeeze(scale_image((im - alp * IM), 0, 255))
        structure = np.squeeze(scale_image(IM, 0, 255))
    else:
        texture = np.squeeze(im - alp * IM)
        structure = np.squeeze(IM)

    return [texture, structure]
def HornSchunck(im1: np.ndarray,
                im2: np.ndarray,
                *,
                alpha: float = 0.001,
                Niter: int = 8,
                verbose: bool = False) -> Tuple[np.ndarray, np.ndarray]:
    """
    Parameters
    ----------
    im1: numpy.ndarray
        image at t=0
    im2: numpy.ndarray
        image at t=1
    alpha: float
        regularization constant
    Niter: int
        number of iteration
    """
    im1 = im1.astype(np.float32)
    im2 = im2.astype(np.float32)

    # set up initial velocities
    uInitial = np.zeros([im1.shape[0], im1.shape[1]])
    vInitial = np.zeros([im1.shape[0], im1.shape[1]])

    # Set initial value for the flow vectors
    U = uInitial
    V = vInitial

    # Estimate derivatives
    [fx, fy, ft] = computeDerivatives(im1, im2)
    #print (fx,fy,ft)

    if verbose:
        from .plots import plotderiv
        plotderiv(fx, fy, fz)


#    print(fx[100,100],fy[100,100],ft[100,100])

# Iteration to reduce error
    for _ in range(Niter):
        # %% Compute local averages of the flow vectors
        uAvg = filter2(U, HSKERN)
        vAvg = filter2(V, HSKERN)

        # %% common part of update step
        der = (fx * uAvg + fy * vAvg + ft) / (alpha**2 + fx**2 + fy**2)
        # %% iterative step
        u = uAvg - fx * der
        v = vAvg - fy * der
    #print(U[50][25])
    #fig, ax = plt.subplots()
    #plt.imshow(im1)
    thresh = 0
    #print(u,v)
    for x in range(0, im1.shape[0], 5):
        for y in range(0, im1.shape[1], 5):
            U = u[x][y]
            V = v[x][y]
            #print(U,V)
            #q = ax.quiver(y,x,U,V,width=.002,color='red')

    #plt.show()
    return u, v