示例#1
0
def expand_im(im, filter_vec, expand_shape):
    """
    reduce image by taking the even pixel in the even row

    Parameters
    ----------
    :param im: array_like

    :param filter_vec: array_like

    :param expand_shape: tuple


    Returns
    -------
    :return the expand image

    """
    # step 1: expand
    M, N = expand_shape
    expanded_im = np.zeros((M, N))
    expanded_im[::2, 1::2] = im

    # step 2: blur
    expanded_im = conv(expanded_im, filter_vec)  # convolution with horizontal filter
    expanded_im = conv(expanded_im, filter_vec.transpose())  # convolution with vertical filter

    return expanded_im
示例#2
0
def compute_derivatives(frame, next_frame, kernelX, kernelY, kernelT,
                        are_phases=False, kernel_center=None):
    if are_phases:
        fx = phase_conv2D(frame, kernelX, kernel_center) \
           + phase_conv2D(next_frame, kernelX, kernel_center)
        fy = phase_conv2D(frame, kernelY, kernel_center) \
           + phase_conv2D(next_frame, kernelY, kernel_center)
        ft = norm_angle(frame - next_frame)
    else:
        fx = conv(frame, kernelX) + conv(next_frame, kernelX)
        fy = conv(frame, kernelY) + conv(next_frame, kernelY)
        # ft = conv(frame, kernelT) + conv(next_frame, -kernelT)
        ft = frame - next_frame
示例#3
0
def horn_schunck_step(frame, next_frame, alpha, max_Niter, convergence_limit,
                      kernelHS, kernelT, kernelX, kernelY):
    """
    Parameters
    ----------
    frame: numpy.ndarray
        image at t=0
    next_frame: numpy.ndarray
        image at t=1
    alpha: float
        regularization constant
    max_Niter: int
        maximum number of iteration
    convergence_limit: float
        the maximum absolute change between iterations defining convergence
    """
    # set up initial velocities
    vx = np.zeros_like(frame)
    vy = np.zeros_like(frame)

    # Estimate derivatives
    [fx, fy, ft] = compute_derivatives(frame,
                                       next_frame,
                                       kernelX=kernelX,
                                       kernelY=kernelY,
                                       kernelT=kernelT)
    # Iteration to reduce error
    for i in range(max_Niter):
        # Compute local averages of the flow vectors (smoothness constraint)
        vx_avg = conv(vx, kernelHS)
        vy_avg = conv(vy, kernelHS)
        # common part of update step (brightness constancy)
        der = (fx * vx_avg + fy * vy_avg + ft) / (alpha**2 + fx**2 + fy**2)
        # iterative step
        new_vx = vx_avg - fx * der
        new_vy = vy_avg - fy * der
        # check convergence
        max_dv = np.max(
            [np.max(np.abs(vx - new_vx)),
             np.max(np.abs(vy - new_vy))])
        vx, vy = new_vx, new_vy
        if max_dv < convergence_limit:
            break
    return vx + vy * 1j
示例#4
0
def reduce_im(im, filter_vec):
    """
    reduce image by taking the even pixel in the even row

    Parameters
    ----------
    :param im: array_like

    :param filter_vec: array_like

    Returns
    -------
    :return the reduced image

    """
    # step 1: blur
    im = conv(im, filter_vec)  # convolution with horizontal filter
    im = conv(im, filter_vec.transpose())  # convolution with vertical filter

    # step 2: reduce
    return im[::2, 1::2]
示例#5
0
    def _get_status2(self):

        a = np.array(self.matrix).reshape(4, 4)

        n = 1
        k = np.ones(n, dtype=int)
        v = (conv((a[1:] == a[:-1]).astype(int), k, axis=0, mode='constant') >=
             n).any()
        h = (conv(
            (a[:, 1:] == a[:, :-1]).astype(int), k, axis=1, mode='constant') >=
             n).any()

        if a.max() >= 256:
            return "win"

        if v | h:
            return "not_over"

        nzeros = np.count_nonzero(a)
        if nzeros >= 16:
            return "lose"
示例#6
0
def normalized(img,window_shape=None):
    if window_shape is None: 
        mfunc = lambda img: img.mean(axis=(0,1))
    else:
        box = np.ones(window_shape)/(window_shape[0]*window_shape[1])
        if len(img.shape) == 3:
            box = box[...,None]
        mfunc = lambda img: conv(img,box)

    diff = img-mfunc(img)
    std = np.sqrt(mfunc(diff**2))
    normalized_img = diff/(std+1e-6)
    return normalized_img
示例#7
0
    def energy_map_w_filter(self):
        """
        Counts energy map of image, using convolution and filter matrices.
        :return: energy map of image
        """
        if self._image is None:
            raise ValueError
        else:
            dx = np.array([
                [1., 2., 1.],
                [0., 0., 0.],
                [-1., -2., -1.]
            ])
            dy = dx.transpose()

            RGB = np.split(self._image, 3, axis=2)
            energy_map = np.zeros(self._image.shape[:2])

            for i in RGB:
                i = i.reshape(self._image.shape[:2])
                energy_map += np.absolute(conv(i, dx)) + np.absolute(conv(i, dy))

            self._energy_map = energy_map
            return energy_map
示例#8
0
def compute_derivatives(frame, next_frame, kernelX, kernelY, kernelT):
    fx = conv(frame, kernelX) + conv(next_frame, kernelX)
    fy = conv(frame, kernelY) + conv(next_frame, kernelY)
    ft = conv(frame, kernelT) + conv(next_frame, -kernelT)
示例#9
0
from skimage import data, img_as_float, color
from skimage.feature import corner_harris, corner_peaks
from scipy import ndimage as ndi
from scipy.ndimage.filters import convolve as conv
from scipy.ndimage.filters import gaussian_filter as gauss
import numpy as np
import matplotlib.pyplot as plt


I = data.camera()
I = img_as_float(I)

Gx = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
Gy = np.transpose(Gx)
Ix = conv(I, Gx, mode='constant')
Iy = conv(I, Gy, mode='constant')

plt.figure(1)
plt.subplot(1,2,1)
plt.imshow(Ix)
plt.subplot(1,2,2)
plt.imshow(Iy)

# Tenseur
Axx = gauss(Ix*Ix, 1, mode='constant')
Ayy = gauss(Iy*Iy, 1, mode='constant')
Axy = gauss(Ix*Iy, 1, mode='constant')

# determinant
detA = Axx * Ayy - Axy ** 2