コード例 #1
0
def dirFilter2D(mSize,nBands):
    filts=[]
    dirs=np.zeros((2,nBands),np.float)
    theta=np.array(range(nBands))*math.pi/nBands
    rho=np.ones(nBands)
    X,Y=cv.polarToCart(rho,theta)
    #X=np.cos(theta)
    #Y=np.sin(theta)
    dirs[0,:] =X.transpose()
    dirs[1,:] =Y.transpose()
    for k in np.array(range(nBands),np.float):
       ang1 = (k-0.5)*math.pi/nBands;
       ang2 = (k+ 0.5)*math.pi/nBands;
       theta = np.array([ang1, ang2, ang1, ang2, ang1],float)
       rho = np.array([1,1,-1,-1,1],float)*(mSize/2)
       X,Y=cv.polarToCart(rho,theta)
       #X=np.cos(theta)*rho
       #Y=np.sin(theta)*rho
       X=np.round(X+mSize/2)
       Y=np.round(Y+mSize/2)
       Mask=np.zeros((mSize,mSize),np.float)
       polyVerticesTemp=np.array([X,Y],np.int32)
       polyVertices=polyVerticesTemp.reshape(2,5)
       polyVerticesNew=polyVertices.transpose()
       Mask=cv.fillConvexPoly(Mask, polyVerticesNew, 1)
       N=np.float(cv.countNonZero(Mask))
       filts.append(Mask/N)

    # correcting the orientation counter clockwise   
    temp = filts[1:]
    temp.reverse()
    filt0 = filts[0]
    filts = [filt0] + temp
    return filts, dirs
コード例 #2
0
    def _get_channel_sal_magn(self, channel):

        if self.use_numpy_fft:
            img_dft = np.fft.fft2(channel)
            magnitude, angle = cv2.cartToPolar(np.real(img_dft),
                                               np.imag(img_dft))
        else:
            img_dft = cv2.dft(np.float32(channel),
                              flags=cv2.DFT_COMPLEX_OUTPUT)
            magnitude, angle = cv2.cartToPolar(img_dft[:, :, 0], img_dft[:, :,
                                                                         1])
        log_ampl = np.log10(magnitude.clip(min=1e-9))

        log_ampl_blur = cv2.blur(log_ampl, (3, 3))

        residual = np.exp(log_ampl - log_ampl_blur)

        if self.use_numpy_fft:
            real_part, imag_part = cv2.polarToCart(residual, angle)
            img_combined = np.fft.ifft2(real_part + 1j * imag_part)
            magnitude, _ = cv2.cartToPolar(np.real(img_combined),
                                           np.imag(img_combined))
        else:
            img_dft[:, :, 0], img_dft[:, :,
                                      1] = cv2.polarToCart(residual, angle)
            img_combined = cv2.idft(img_dft)
            magnitude, _ = cv2.cartToPolar(img_combined[:, :, 0],
                                           img_combined[:, :, 1])

        return magnitude
コード例 #3
0
def main():

    # Imagens
    img1 = cv.imread("../imgs/puente.jpg", 0)
    img2 = cv.imread("../imgs/ferrari-c.png", 0)

    # Transformada
    m1, f1 = tf_complexa(img1)
    m2, f2 = tf_complexa(img2)

    # Montando
    x0, y0 = cv.polarToCart(m1, f2, angleInDegrees=False)
    x1, y1 = cv.polarToCart(m2, f1, angleInDegrees=False)
    im0 = cv.merge([x0, y0])
    im1 = cv.merge([x1, y1])

    # Inversa
    inv0 = cv.idft(im0, cv.DFT_COMPLEX_OUTPUT)
    inv1 = cv.idft(im1, cv.DFT_COMPLEX_OUTPUT)

    # Combinar imagens para mostrar
    r1 = cv.magnitude(inv0[:, :, 0], inv0[:, :, 1])
    r2 = cv.magnitude(inv1[:, :, 0], inv1[:, :, 1])

    # Normalizar
    r1 = cv.normalize(r1, 0, 255, cv.NORM_MINMAX)
    r2 = cv.normalize(r2, 0, 255, cv.NORM_MINMAX)

    #escala logaritmica
    #magn = cv.log(magn + 1)
    #centralizar
    #magn = np.fft.fftshift(magn, axes=None)

    # Mostrar
    plt.subplot(2, 2, 1)
    plt.xticks([])
    plt.yticks([])
    plt.title("Imagem 1")
    plt.imshow(img1, cmap="gray")

    plt.subplot(2, 2, 2)
    plt.xticks([])
    plt.yticks([])
    plt.title("Imagem 2")
    plt.imshow(img2, cmap="gray")

    plt.subplot(2, 2, 3)
    plt.xticks([])
    plt.yticks([])
    plt.title("M1 + F2")
    plt.imshow(r1, cmap="gray")

    plt.subplot(2, 2, 4)
    plt.xticks([])
    plt.yticks([])
    plt.title("M2 + F1")
    plt.imshow(r2, cmap="gray")

    plt.show()
コード例 #4
0
ファイル: ps2.py プロジェクト: thiagofigcosta/computer-vision
def buildImgFromAmpMeanAndPse(amp, pse):
    amp_mean = amp.copy().fill(amp.mean())
    mixed = np.zeros((amp_mean.shape[0], amp_mean.shape[1], 2),
                     dtype=np.float64)
    mixed[:mixed.shape[0], :mixed.shape[1],
          0] = cv2.polarToCart(amp_mean, pse)[0]
    mixed[:mixed.shape[0], :mixed.shape[1],
          1] = cv2.polarToCart(amp_mean, pse)[1]
    return cv2.idft(mixed, flags=cv2.DFT_REAL_OUTPUT | cv2.DFT_SCALE)
コード例 #5
0
 def mix(self, imageToBeMixed: 'ImageModel', magnitudeOrRealRatio: float,
         phaesOrImaginaryRatio: float, mode: 'Modes') -> np.ndarray:
     """
     a function that takes ImageModel object mag ratio, phase ration 
     """
     ###
     # implement this function
     ###
     mix = np.zeros((self.imgByte.shape[0], self.imgByte.shape[1], 2),
                    'float64')
     if mode.value == "testMagAndPhaseMode":
         if (self.uniMag and imageToBeMixed.uniPh):
             real, imaginary = cv.polarToCart(
                 self.uniMagnitude * magnitudeOrRealRatio +
                 imageToBeMixed.magnitude * (1 - magnitudeOrRealRatio),
                 self.phase * (1 - phaesOrImaginaryRatio) +
                 imageToBeMixed.uniPhase * phaesOrImaginaryRatio,
                 angleInDegrees=True)
         elif (self.uniMag):
             real, imaginary = cv.polarToCart(
                 self.uniMagnitude * magnitudeOrRealRatio +
                 imageToBeMixed.magnitude * (1 - magnitudeOrRealRatio),
                 self.phase * (1 - phaesOrImaginaryRatio) +
                 imageToBeMixed.phase * phaesOrImaginaryRatio,
                 angleInDegrees=True)
         elif (imageToBeMixed.uniPh):
             real, imaginary = cv.polarToCart(
                 self.magnitude * magnitudeOrRealRatio +
                 imageToBeMixed.magnitude * (1 - magnitudeOrRealRatio),
                 self.phase * (1 - phaesOrImaginaryRatio) +
                 imageToBeMixed.uniPhase * phaesOrImaginaryRatio,
                 angleInDegrees=True)
         else:
             real, imaginary = cv.polarToCart(
                 self.magnitude * magnitudeOrRealRatio +
                 imageToBeMixed.magnitude * (1 - magnitudeOrRealRatio),
                 self.phase * (1 - phaesOrImaginaryRatio) +
                 imageToBeMixed.phase * phaesOrImaginaryRatio,
                 angleInDegrees=True)
     elif mode.value == "testRealAndImagMode":
         real = self.real * magnitudeOrRealRatio + imageToBeMixed.real * (
             1 - magnitudeOrRealRatio)
         imaginary = self.imaginary * (
             1 - phaesOrImaginaryRatio
         ) + imageToBeMixed.imaginary * phaesOrImaginaryRatio
     mix[:, :, 0], mix[:, :, 1] = real, imaginary
     invImg = cv.idft(mix, flags=cv.DFT_SCALE | cv.DFT_REAL_OUTPUT)
     invImg *= 255.0 / np.max(invImg)
     return invImg
コード例 #6
0
    def hybridImage(self):
        #Gaussian method
        trans1 = self.myGaussian(self.img1)
        trans2 = self.img2 - self.myGaussian(self.img2)
        result = trans2 + trans1
        # result = result.astype(np.uint16)
        cv.imwrite(OUTPUT_GAUSSIAN, result)

        #FFT method
        trans1 = self.myFFT(self.img1)
        trans2 = self.myFFT(self.img2)
        h = trans1.shape[0]
        w = trans1.shape[1]

        #中心点坐标
        center_w = w // 2
        center_h = h // 2

        window = np.zeros((h, w))

        for i in range(h):
            for j in range(w):
                r = ((i - center_h)**2 + (j - center_w)**2)**0.5
                window[i][j] = self.get_cv(r, SIGMA_FFT)

        #magnitude为幅值域,phase为相位域,幅值域与滤镜相乘,相位不变。
        magnitude1 = cv.magnitude(trans1.real, trans1.imag)
        magnitude2 = cv.magnitude(trans2.real, trans2.imag)
        phase1 = cv.phase(trans1.real, trans1.imag)
        phase2 = cv.phase(trans2.real, trans2.imag)
        for i in range(3):
            magnitude1[:, :, i] *= window
            magnitude2[:, :, i] -= magnitude2[:, :, i] * window

        #重新变为复数域
        result1_real, result1_imag = cv.polarToCart(magnitude1, phase1)
        result2_real, result2_imag = cv.polarToCart(magnitude2, phase2)
        # print(magnitude)
        # print(phase)
        trans1.real = result1_real
        trans1.imag = result1_imag
        trans2.real = result2_real
        trans2.imag = result2_imag
        # print(trans1)

        #反傅里叶变换
        img_back = self.myIFFT(trans1 + trans2)
        cv.imwrite(OUTPUT_FFT, img_back)
コード例 #7
0
def polar(I, center, r, theta=(0, 360), rstep=1, thetastep=360.0 / (180 * 8)):
    # 得到距离最小、最大范围。
    minr, maxr = r
    # 角度的最小范围
    mintheta, maxtheta = theta
    # 输出图像的高、宽
    H = int((maxr - minr) / rstep + 1)
    W = int((maxtheta - mintheta) / thetastep + 1)
    O = 125 * np.ones((H, W), I.dtype)
    cx, cy = center
    # 极坐标变换
    r = np.linspace(minr, maxr, H)
    r = np.tile(r, (W, 1))
    r = np.transpose(r)  # 矩阵转置
    theta = np.linspace(mintheta, maxtheta, W)
    theta = np.tile(theta, (H, 1))  # 垂直方向重复H次,水平1次
    x, y = cv2.polarToCart(r, theta, angleInDegrees=True)
    # 最近邻插值
    for i in range(H):
        for j in range(W):
            px = int(round(x[i][j]) + cx)
            py = int(round(y[i][j]) + cy)
            if ((px >= 0 and px <= W - 1) and (py >= 0 and py <= H - 1)):
                O[i][j] = I[py][px]
            else:
                O[i][j] = 125  # 灰色
    return O
コード例 #8
0
def saliency_feature(img):
    img_orig = img
    img = cv2.resize(img, (64, 64))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # h = cv2.getOptimalDFTSize(img.shape[0])
    # w = cv2.getOptimalDFTSize(img.shape[1])
    # print "Resizing (%d, %d) to (%d, %d)" % (img.shape[0], img.shape[1], h, w)
    # h = (h - img.shape[0])/2.0
    # w = (w - img.shape[1])/2.0
    # img = cv2.copyMakeBorder(img, int(math.floor(h)), int(math.ceil(h)), int(math.floor(w)), int(math.ceil(w)), cv2.BORDER_CONSTANT, value=0)

    dft = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)
    A, P = cv2.cartToPolar(dft[:,:,0], dft[:,:,1])
    L = cv2.log(A)
    h_n = (1./3**2)*np.ones((3,3))
    R = L - cv2.filter2D(L, -1, h_n)
    S = cv2.GaussianBlur(cv2.idft(np.dstack(cv2.polarToCart(cv2.exp(R), P)), flags=cv2.DFT_REAL_OUTPUT)**2, (0,0), 8)
    S = cv2.resize(cv2.normalize(S, None, 0, 1, cv2.NORM_MINMAX), (img_orig.shape[1],img_orig.shape[0]))

    # cv2.namedWindow('tmp1', cv2.WINDOW_NORMAL)
    # cv2.imshow('tmp1', img_orig)
    # cv2.namedWindow('tmp', cv2.WINDOW_NORMAL)
    # cv2.imshow('tmp', S)
    # cv2.waitKey()

    return S
コード例 #9
0
def re_transform(phases,amplitude):
    x,y = cv.polarToCart(amplitude,phases)
    back = cv.merge([x, y])
    back_ishift = np.fft.ifftshift(back)
    img_back = cv.idft(back_ishift)
    img_back = cv.magnitude(img_back[:,:,0], img_back[:,:,1])
    return img_back
コード例 #10
0
def img_polar_transform(input_img,
                        center,
                        r_range,
                        theta_rangge=(0, 360),
                        r_step=0.5,
                        theta_step=360.0 / (90.)):
    minr, maxr = r_range
    mintheta, maxtheta = theta_rangge
    H = int((maxr - minr) / r_step + 1)
    W = int((maxtheta - mintheta) / theta_step + 1)
    #print(H,W,theta_step)
    output_img = np.zeros((H, W), input_img.dtype)
    x_center, y_center = center

    r = np.linspace(minr, maxr, H)
    r = np.tile(r, (W, 1))
    r = np.transpose(r)
    theta = np.linspace(mintheta, maxtheta, W)
    theta = np.tile(theta, (H, 1))
    x, y = cv2.polarToCart(r, theta, angleInDegrees=True)

    for i in range(H):
        for j in range(W):
            px = int(round(x[i, j]) + x_center)
            py = int(round(y[i, j]) + y_center)
            if ((px >= 0 and px <= 255) and (py >= 0 and py <= 255)):
                #print(px,py)
                output_img[i, j] = input_img[px, py]
    return output_img
コード例 #11
0
    def update_pos_given_shift(self, single_marker):
        print("Calculate single")
        # Get the data of the new perceived marker
        position_current_xy, distance_current, angle_surface_current = self.get_marker_xy(
            single_marker)

        # Calculate the difference in angle for coordination transform rotation
        phi = single_marker.shift_angle_trans[0]

        # Calculate the translation by calculating the coordinates according to the rotated origin
        trans_pos_xy = cv2.polarToCart(distance_current, phi)
        trans_pos_xy = (trans_pos_xy[0][0][0], trans_pos_xy[1][0][0])

        # The translation vector is now the difference between the coordinates, relative to
        # the base marker and the coordinates, relative to the new marker
        trans_rel_base = single_marker.shift_angle_trans[1]

        # Our position in the system of the base marker, given by the new marker, can now be calculated
        # First the rotation is applied
        new_x = position_current_xy[0] * np.cos(
            phi) + position_current_xy[1] * np.sin(phi)
        new_y = position_current_xy[0] * np.sin(
            phi) + position_current_xy[1] * np.cos(phi)

        # Then the translation is performed
        resulting_x = new_x + trans_rel_base[0]
        resulting_y = new_y + trans_rel_base[1]

        # Write the resulting data into the marker
        single_marker.pos_xy = (resulting_x, resulting_y)
コード例 #12
0
    def update_pos_shift_to_base(self, base_marker, marker_b):

        # Get the data of the new perceived marker
        position_current_xy, distance_current, angle_surface_current = self.get_marker_xy(
            marker_b)

        # Get the data of the base marker, perceived from the new position
        base_xy, dist_base, ang_base = self.get_marker_xy(base_marker)

        # Calculate the difference in angle for coordination transform rotation
        phi = ang_base - angle_surface_current

        # Calculate the translation by calculating the coordinates according to the rotated origin
        trans_pos_xy = cv2.polarToCart(distance_current, phi)
        trans_pos_xy = (trans_pos_xy[0][0][0], trans_pos_xy[1][0][0])

        # The translation vector is now the difference between the coordinates, relative to
        # the base marker and the coordinates, relative to the new marker
        trans_rel_base = np.subtract(base_xy, trans_pos_xy)

        # Our position in the system of the base marker, given by the new marker, can now be calculated
        # First the rotation is applied
        new_x = position_current_xy[0] * np.cos(
            phi) + position_current_xy[1] * np.sin(phi)
        new_y = position_current_xy[0] * np.sin(
            phi) + position_current_xy[1] * np.cos(phi)

        # Then the translation is performed
        resulting_x = new_x + trans_rel_base[0]
        resulting_y = new_y + trans_rel_base[1]

        # Write the resulting data into the marker
        marker_b.pos_xy = (resulting_x, resulting_y)
        marker_b.shift_angle_trans = (phi, (trans_rel_base))
        marker_b.aquired_at_distance = distance_current
コード例 #13
0
def lens_distortion(pil_img, exp, scale):

    img = np.array(pil_img)

    img_c_2 = img[150:250, 100:200]

    rows, cols = img_c_2.shape[:2]

    mapy, mapx = np.indices((rows, cols), dtype=np.float32)

    mapx = 2 * mapx / (cols - 1) - 1
    mapy = 2 * mapy / (rows - 1) - 1

    r, theta = cv2.cartToPolar(mapx, mapy)

    r[r < scale] = r[r < scale]**exp

    mapx, mapy = cv2.polarToCart(r, theta)

    mapx = ((mapx + 1) * (cols - 1)) / 2
    mapy = ((mapy + 1) * (rows - 1)) / 2

    distorted = cv2.remap(img_c_2, mapx, mapy, cv2.INTER_LINEAR)

    img[150:250, 100:200] = distorted

    pil_img = Image.fromarray(img)

    return pil_img
コード例 #14
0
def compute_motion_compensation(motion_matrix, stab_mode,
                                trimmed_mean_percentage):
    if stab_mode == 'mean':
        u = motion_matrix[:, :, 0].mean()
        v = motion_matrix[:, :, 1].mean()
    elif stab_mode == 'trimmed_mean':
        u = trim_mean(motion_matrix[:, :, 0],
                      trimmed_mean_percentage,
                      axis=None)
        v = trim_mean(motion_matrix[:, :, 1],
                      trimmed_mean_percentage,
                      axis=None)
    elif stab_mode == 'median':
        u, v = np.median(motion_matrix[:, :, 0]), np.median(motion_matrix[:, :,
                                                                          1])
    elif stab_mode == 'mode':
        us, vs = cv2.cartToPolar(motion_matrix[:, :, 0], motion_matrix[:, :,
                                                                       1])
        mu, mv = mode(us.ravel())[0], mode(vs.ravel())[0]
        u, v = cv2.polarToCart(mu, mv)
        u, v = u[0][0], v[0][0]
    else:
        raise NotImplemented(
            "Choose one of implemented modes: mean, trimmed_mean, median")
    return u, v
コード例 #15
0
ファイル: optimizer.py プロジェクト: dkloving/Move13_Task
def arctan_unwarp(points, a, image_width, image_height):
    """
    Transforms points to remove lens distortion according to an arctan model. Cannot produce the
     identity function, but asymptotically approaches it as `a` goes toward 0

    :param points:
        ndarray of image space points
    :param a:
        real-valued scalar, tunable parameter.
    :param image_width:
        int, image width in pixels
    :param image_height:
        int, image image_height in pixels
    :returns:
        array of points after transformation
    """
    x = points[..., 0]
    y = points[..., 1]
    # center
    x_center = x - image_width / 2
    y_center = y - image_height / 2
    # convert to polar coords
    r_polar, theta_polar = cv2.cartToPolar(x_center, y_center)
    # # make lens correction
    r_max = np.hypot(image_width, image_height) * 0.5
    r_polar = r_max * np.tan(r_polar * np.arctan(a) / r_max) / a
    # convert back to cartesian coords and un-center
    x_cart, y_cart = cv2.polarToCart(r_polar, theta_polar)
    x = np.squeeze(x_cart) + image_width / 2
    y = np.squeeze(y_cart) + image_height / 2
    # make array
    points_adjusted = np.stack([x, y], axis=-1)
    return points_adjusted
コード例 #16
0
ファイル: polartocart.py プロジェクト: screamff/Hello-Python
def polar(I,
          center,
          r,
          theta=(0, 360),
          r_step=1.0,
          theta_step=360.0 / (180 * 8)):
    """
    将圆形图进行极坐标转换,将截取的某段圆环拉升为矩形
    """
    h, w = I.shape
    #r的取值范围
    min_r, max_r = r
    #theta取值范围
    min_theta, max_theta = theta
    #输出图像的宽高
    H = int((max_r - min_r) / r_step) + 1
    W = int((max_theta - min_theta) / theta_step) + 1
    out_pic = 125 * np.ones((H, W), I.dtype)
    #极坐标转换
    r = np.linspace(min_r, max_r, H)
    r = np.tile(r, (W, 1))
    r = np.transpose(r)
    theta = np.linspace(min_theta, max_theta, W)
    theta = np.tile(theta, (H, 1))
    x, y = cv2.polarToCart(r, theta, angleInDegrees=1)
    #最近邻插值
    cx, cy = center
    for i in xrange(H):
        for j in xrange(W):
            px = int(round(x[i][j]) + cx)
            py = int(round(y[i][j]) + cy)
            if ((px >= 0 and px <= w - 1)) and (py >= 0 and py <= h - 1):
                out_pic[i][j] = I[px][py]
    return out_pic
コード例 #17
0
def img_polar_transform(input_img,
                        center=(IMAGE_SIZE / 2, IMAGE_SIZE / 2),
                        r_range=(0, int(IMAGE_SIZE / 2 * 1.414)),
                        theta_range=(0, 360),
                        r_step=1.0,
                        theta_step=360.0 / (360.0)):
    minr, maxr = r_range
    mintheta, maxtheta = theta_range
    H = int((maxr - minr) / r_step + 1)  #
    W = int((maxtheta - mintheta) / theta_step + 1)  #361

    output_img = np.zeros((H, W), input_img.dtype)
    x_center, y_center = center

    r = np.linspace(minr, maxr, H)
    r = np.tile(r, (W, 1))
    r = np.transpose(r)
    theta = np.linspace(mintheta, maxtheta, W)
    theta = np.tile(theta, (H, 1))
    x, y = cv2.polarToCart(r, theta, angleInDegrees=True)

    for i in range(H):
        for j in range(W):
            px = int(round(x[i, j]) + x_center)
            py = int(round(y[i, j]) + y_center)
            #最后10
            if ((px >= 0 and px <= IMAGE_SIZE - 1)
                    and (py >= 0 and py <= IMAGE_SIZE - 1)):
                output_img[i, j] = input_img[px, py]
    return output_img
コード例 #18
0
 def get_marker_xy(self, marker):
     distance = aruco_data.get_distance(marker)
     angle = aruco_data.get_angle_surface(marker)
     #angle = aruco_data.get_angle_to_center(marker)
     # Now we can get the dx and dy of movement
     xy = cv2.polarToCart(distance, angle)
     return tuple((xy[0][0][0], xy[1][0][0])), distance, angle
コード例 #19
0
 def inverse_fourier_transform(self):
     plane = cv2.polarToCart(self.magnitude, self.phase)
     fourier = cv2.merge(plane)
     fourier_inv_shift = np.fft.ifftshift(fourier)
     img_back = cv2.idft(fourier_inv_shift)
     new_plane = cv2.split(img_back)
     self.img_back = cv2.magnitude(new_plane[0], new_plane[1])
コード例 #20
0
def polar(I,
          center,
          r,
          theta=(0, 360),
          rstep=1.0,
          thetastep=360.0 / (180 * 8)):
    #得到距离的最小最大范围
    minr, maxr = r
    #角度的最小范围
    mintheta, maxtheta = theta
    #输出图像的高,宽
    H = int((maxr - minr) / rstep) + 1
    W = int((maxtheta - mintheta) / thetastep) + 1
    O = np.zeros((H, W), I.dtype)
    #极坐标变换
    r = np.linspace(minr, maxr, H)
    r = np.tile(r, (W, 1))
    r = np.transpose(r)
    theta = np.linspace(mintheta, maxtheta, W)
    theta = np.tile(theta, (H, 1))
    x, y = cv2.polarToCart(r, theta, angleInDegrees=True)
    #最近邻插值
    for i in xrange(H):
        for j in xrange(W):
            px = int(round(x[i][j]) + cx)
            py = int(round(y[i][j]) + cy)
            if (px > w - 1 or py > h - 1):
                O[i][j] = 125  #灰色
            else:
                O[i][j] = I[py][px]
    return O
コード例 #21
0
 def test_Polar2Anix(self):
     # lower_red(RGB) (208,79,113)
     # upper (212,47,56)
     r, angle = cv2.cartToPolar(1, 1)
     print(r, angle)
     x, y = cv2.polarToCart(r, angle)
     print(x, y)
コード例 #22
0
def saliency_feature(img):
    img_orig = img
    img = cv2.resize(img, (64, 64))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # h = cv2.getOptimalDFTSize(img.shape[0])
    # w = cv2.getOptimalDFTSize(img.shape[1])
    # print "Resizing (%d, %d) to (%d, %d)" % (img.shape[0], img.shape[1], h, w)
    # h = (h - img.shape[0])/2.0
    # w = (w - img.shape[1])/2.0
    # img = cv2.copyMakeBorder(img, int(math.floor(h)), int(math.ceil(h)), int(math.floor(w)), int(math.ceil(w)), cv2.BORDER_CONSTANT, value=0)

    dft = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)
    A, P = cv2.cartToPolar(dft[:, :, 0], dft[:, :, 1])
    L = cv2.log(A)
    h_n = (1. / 3**2) * np.ones((3, 3))
    R = L - cv2.filter2D(L, -1, h_n)
    S = cv2.GaussianBlur(
        cv2.idft(np.dstack(cv2.polarToCart(cv2.exp(R), P)),
                 flags=cv2.DFT_REAL_OUTPUT)**2, (0, 0), 8)
    S = cv2.resize(cv2.normalize(S, None, 0, 1, cv2.NORM_MINMAX),
                   (img_orig.shape[1], img_orig.shape[0]))

    # cv2.namedWindow('tmp1', cv2.WINDOW_NORMAL)
    # cv2.imshow('tmp1', img_orig)
    # cv2.namedWindow('tmp', cv2.WINDOW_NORMAL)
    # cv2.imshow('tmp', S)
    # cv2.waitKey()

    return S
コード例 #23
0
def get_center_circle_points(own_xys):

    goalxys = []
    goal_offset = -(np.pi / 8.0)
    circle_radius = radius_arena - distance_from_boundary_of_circle  # meter

    # calculate positions on a circle near the center
    for pos in own_xys:
        own_x = pos[0]
        own_y = pos[1]

        # Get a point on the circle
        distance_goalpoint = circle_radius

        # Now take the distance to the goalpoint in combination with the angle to the
        # goalpoint to make out the intersection of the straight line in between the
        # car and that circle
        angle_center = np.arctan2(own_y, own_x)

        # Change the angle to be in front of the vehicle like the carrot on a stick
        angle_goalpoint = angle_center + goal_offset

        # Next calculate that new point
        goal_xy = cv2.polarToCart(distance_goalpoint, angle_goalpoint)
        goal_xy = [goal_xy[0][0][0], goal_xy[1][0][0]]

        # If the distance of that new goalpoint is too far from our position

        goalxys.append(goal_xy)

    return goalxys
コード例 #24
0
def bin_sum(h_var, ventana):
    vector = np.arange(360)
    vector.resize(1, 360)
    X, Y = cv2.polarToCart(h_var, vector * (np.pi / 180))
    X_nueva = np.zeros((1, 360))
    Y_nueva = np.zeros((1, 360))
    for i in range(0, 360):
        if ((i + ventana - 1) > 360):
            topeX = (i + ventana - 1) - 360
            topeY = (i + ventana - 1) - 360
            X1 = X[0, i:360].reshape(i - 360, 1)
            X2 = X[0, 0:topeX].reshape(topeY, 1)
            if X1.shape[0] == 0:
                XX = X2
            elif X2.shape[0] == 0:
                XX = X1
            else:
                XX = np.append(X1, X2)
            Y1 = Y[0, i:360].reshape(i - 360, 1)
            Y2 = Y[0, 0:topeY].reshape(topeY, 1)
            if Y1.shape[0] == 0:
                YY = Y2
            elif Y2.shape[0] == 0:
                YY = Y1
            else:
                YY = np.append(Y1, Y2)
        else:
            XX = X[0, i:i + ventana - 1]
            YY = Y[0, i:i + ventana - 1]
        pond = np.linspace(0, 1, ventana - 1)
        X_nueva[0, i] = np.sum(XX * pond) / ventana
        Y_nueva[0, i] = np.sum(YY * pond) / ventana
        rho, theta = cv2.cartToPolar(X_nueva, Y_nueva)
    return rho, theta
コード例 #25
0
def onChanged(x):
    exp = x / 10
    # 매핑 배열 생성 ---②
    mapy, mapx = np.indices((rows, cols), dtype=np.float32)

    # 좌상단 기준좌표에서 -1~1로 정규화된 중심점 기준 좌표로 변경 ---③
    mapx = (2 * mapx - cols) / cols
    mapy = (2 * mapy - rows) / rows

    # 직교좌표를 극 좌표로 변환 ---④
    r, theta = cv2.cartToPolar(mapx, mapy)

    # 왜곡 영역만 중심확대/축소 지수 적용 ---⑤
    r[r < scale] = r[r < scale]**exp

    # 극 좌표를 직교좌표로 변환 ---⑥
    mapx, mapy = cv2.polarToCart(r, theta)

    # 중심점 기준에서 좌상단 기준으로 변경 ---⑦
    mapx = ((mapx + 1) * cols) / 2
    mapy = ((mapy + 1) * rows) / 2

    # 재매핑 변환
    distorted = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)
    cv2.imshow('lens', distorted)
コード例 #26
0
    def car_to_field(self, rvec, tvec):
        """Transform the car's location and direction to field coordinates, shift to field plane.

        :param rvec: Rotation vector (as output from aruco.estimatePoseBoard)
        :param tvec: Translation vector (as output from aruco.estimatePoseBoard)
        :return: loc, dir - Location in field coordinates, direction vector (normalized) in field coords.
        """
        cam_mat = cam_params.logitech_matrix
        cam_dist = cam_params.logitech_dist_coeffs
        mat = np.frombuffer(self.master.perspective_matrix.get_obj()).reshape(
            (3, 3))
        if mat.any(
        ):  # If the perspective matrix has already been updated by StaticProcessor
            obj_points = np.array([[0, 0, 0], [0, 1, 0]], np.float32)
            pic_points, _ = cv2.projectPoints(obj_points, rvec, tvec, cam_mat,
                                              cam_dist)
            cx, cy = self.master.center.coords()
            lens, angles = cv2.cartToPolar(cx - pic_points[:, 0, 0],
                                           cy - pic_points[:, 0, 1])
            lens *= (1 - 85. / H_CAM_MM)
            pic_points = np.array(cv2.polarToCart(lens, angles)).T
            pic_points = pic_points[0, :, None]  # Cause OpenCV is ...
            pic_points = np.array((cx, cy)) - pic_points
            field_points = cv2.perspectiveTransform(pic_points, mat)
            loc = field_points[0, 0]
            dir = field_points[1, 0] - field_points[0, 0]
            cv2.normalize(dir, dir, 70 * pixres, norm_type=2)
            return loc, dir
        else:
            return None, None
コード例 #27
0
 def get_fov_triangle(self, xy, heading, fov_angle, distance):
     '''
     returns the field of view as triangle, based on a sequence of
     coordinates. It will always regard the last coordinates in that list
     '''    
     limit_left = heading - np.deg2rad(fov_angle / 2.0)
     limit_right = heading + np.deg2rad(fov_angle / 2.0)
     
     a = xy  # Point a is the latest known position
     b = cv2.polarToCart(distance, limit_left)  # Point b is distance in front and half of fov to the left
     c = cv2.polarToCart(distance, limit_right)  # Point c is distance in front and half of fov to the right
     
     # Needed extraction of the values from the polarToCart method
     # and adding of a as the origin 
     b = b[0][0][0] + a[0], b[1][0][0] + a[1]
     c = c[0][0][0] + a[0], c[1][0][0] + a[1]
     
     return Triangle(to_point(a), to_point(b), to_point(c))
コード例 #28
0
def project_pos(xy, current_heading, distance=2):
    '''
    Take the position as xy and the current heading and returns future
    xy in a distance of distance
    '''

    x, y = cv2.polarToCart(distance, current_heading)

    return [x[0][0] + xy[0], y[0][0] + xy[1]]
コード例 #29
0
    def _get_channel_sal_magn(self, channel):
        """Returns the log-magnitude of the Fourier spectrum

            This method calculates the log-magnitude of the Fourier spectrum
            of a single-channel image. This image could be a regular grayscale
            image, or a single color channel of an RGB image.

            :param channel: single-channel input image
            :returns: log-magnitude of Fourier spectrum
        """
        # do FFT and get log-spectrum
        if self.use_numpy_fft:
            img_dft = np.fft.fft2(channel)
            magnitude, angle = cv2.cartToPolar(np.real(img_dft),
                                               np.imag(img_dft))
        else:
            img_dft = cv2.dft(np.float32(channel),
                              flags=cv2.DFT_COMPLEX_OUTPUT)
            magnitude, angle = cv2.cartToPolar(img_dft[:, :, 0],
                                               img_dft[:, :, 1])

        # get log amplitude
        log_ampl = np.log10(magnitude.clip(min=1e-9))

        # blur log amplitude with avg filter
        log_ampl_blur = cv2.blur(log_ampl, (3, 3))

        # residual
        residual = np.exp(log_ampl - log_ampl_blur)

        # back to cartesian frequency domain
        if self.use_numpy_fft:
            real_part, imag_part = cv2.polarToCart(residual, angle)
            img_combined = np.fft.ifft2(real_part + 1j*imag_part)
            magnitude, _ = cv2.cartToPolar(np.real(img_combined),
                                           np.imag(img_combined))
        else:
            img_dft[:, :, 0], img_dft[:, :, 1] = cv2.polarToCart(residual,
                                                                 angle)
            img_combined = cv2.idft(img_dft)
            magnitude, _ = cv2.cartToPolar(img_combined[:, :, 0],
                                           img_combined[:, :, 1])

        return magnitude
コード例 #30
0
    def remove_periodic_interference(self, image):
        height, width = image.shape
        # min and max values of image
        min_v = np.amin(image)
        max_v = np.amax(image)
        mean_v = int(np.mean(image))
        # adding padding to image
        padded_height = math.ceil(math.log2(height))
        padded_height = int(math.pow(2, padded_height))
        padded_width = math.ceil(math.log2(width))
        padded_width = int(math.pow(2, padded_width))
        padded_image = np.full((padded_height, padded_width), mean_v, dtype=np.uint8)
        padded_image[0:height, 0:width] = image
        # convert image to float and save as complex output
        dft = cv2.dft(np.float32(padded_image), flags=cv2.DFT_COMPLEX_OUTPUT)
        # shift of origin from upper left corner to center of image
        dft_shifted = np.fft.fftshift(dft)
        # magnitude and phase images
        mag, phase = cv2.cartToPolar(dft_shifted[:, :, 0], dft_shifted[:, :, 1])
        # extract spectrum
        spectrum = np.log(mag) / 20
        min, max = np.amin(spectrum, (0, 1)), np.amax(spectrum, (0, 1))
        # threshold spectrum to find bright spots
        thresh_spec = (255 * spectrum).astype(np.uint8)
        thresh_spec = cv2.threshold(thresh_spec, 155, 255, cv2.THRESH_BINARY)[1]
        # cover center rows of thresh with black
        yc = padded_height // 2
        cv2.line(thresh_spec, (0, yc), (padded_width - 1, yc), 0, 5)
        # get y coordinates bright spots
        bright_spots = np.column_stack(np.nonzero(thresh_spec))
        # mask
        mask = thresh_spec.copy()
        for b in bright_spots:
            y = b[0]
            cv2.line(mask, (0, y), (padded_width - 1, y), 255, 5)
        # apply mask to magnitude
        mag[mask != 0] = 0
        # convert new magnitude and old phase into cartesian real and imaginary components
        real, imag = cv2.polarToCart(mag, phase)
        # combine cart comp into one compl image
        back = cv2.merge([real, imag])
        # shift origin from center to upper left corner
        back_ishift = np.fft.ifftshift(back)
        # do idft saving as complex output
        img_back = cv2.idft(back_ishift)
        # combine complex components into original image again
        img_back = cv2.magnitude(img_back[:, :, 0], img_back[:, :, 1])
        # crop to original size
        img_back = img_back[0:height, 0:width]
        # re-normalize to 8-bits in range of original
        min, max = np.amin(img_back, (0, 1)), np.amax(img_back, (0, 1))
        notched = cv2.normalize(img_back, None, alpha=min_v, beta=max_v,
                                norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)

        return notched
def computeSaliencyImg(img):
    """
    This function computes the saliency map of a given image per the algorithm
    put forth in the work of Xiaodi Hou et. al. 

    params:
        img (int8 image): the image to compute the saliency map for.
    
    returns:
        saliencyMap (int8 image): the computed saliency map
    """
    # compute dft and shift
    fImage = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)
    fImageShifted = np.fft.fftshift(fImage)

    # extract real and imaginary image components in polar form
    real, imaginary = cv2.cartToPolar(fImageShifted[:, :, 0],
                                      fImageShifted[:, :, 1])

    # compute log of real image
    logImage = np.log(real)

    # compute blurred version of real image
    blurKernel = np.ones((7, 7), np.float32) / (7 * 7)
    logImageBlured = cv2.filter2D(logImage, -1, blurKernel)

    # compute spectral residue image on log scale
    spectralResidualImageLog = logImage - logImageBlured

    # undo log scaling to real image component matches non-log scaling
    # of phase/imaginary image component
    spectralResidualImage = np.exp(spectralResidualImageLog)

    # move back to cartesian complex form
    fImageShifted[:, :, 0], fImageShifted[:, :, 1] = cv2.polarToCart(
        spectralResidualImage, imaginary)

    # compute neccessary shift for invese DFT
    fInvShift = np.fft.ifftshift(fImageShifted)

    # perform inverse DFT
    saliencyMapComplex = cv2.idft(fInvShift)**2

    # find real part of saliency map from obtained DFT results
    saliencyMap = cv2.magnitude(saliencyMapComplex[:, :, 0],
                                saliencyMapComplex[:, :, 1])

    # normalize around the maximum value (result between 0-1)
    maxVal = np.max(saliencyMap)
    saliencyMap = saliencyMap / maxVal

    # scale image up to 8-bit range (0-255)
    saliencyMap = saliencyMap * 255

    return saliencyMap
コード例 #32
0
ファイル: saliency.py プロジェクト: xenron/sandbox-da-python
    def __GetChannelSalMagn(self, channel):
        # do FFT and get log-spectrum
        if self.useNumpyFFT:
            imgDFT = np.fft.fft2(channel)
            magnitude,angle = cv3.cartToPolar(np.real(imgDFT), np.imag(imgDFT))
        else:
            imgDFT = cv3.dft(np.float32(channel), flags=cv3.DFT_COMPLEX_OUTPUT)
            magnitude,angle = cv3.cartToPolar(imgDFT[:,:,0], imgDFT[:,:,1])

        # get log amplitude
        logAmpl = np.log10(magnitude.clip(min=1e-9))

        # blur log amplitude with avg filter
        logAmplBlur = cv3.blur(logAmpl, (3,3))

        # residual
        residual = np.exp(logAmpl - logAmplBlur)

        # back to cartesian frequency domain
        if self.useNumpyFFT:
            realPart,imagPart = cv3.polarToCart(residual, angle)
            imgCombined = np.fft.ifft2(realPart + 1j*imagPart)
            magnitude,_ = cv3.cartToPolar(np.real(imgCombined), np.imag(imgCombined))
        else:
            imgDFT[:,:,0],imgDFT[:,:,1] = cv3.polarToCart(residual, angle)
            imgCombined = cv3.idft(imgDFT)
            magnitude,_ = cv3.cartToPolar(imgCombined[:,:,0], imgCombined[:,:,1])

        # magnitude = magnitude - np.mean(magnitude)

        # if self.gaussKernel is not None:
        #     magnitude = cv3.GaussianBlur(np.float32(magnitude), self.gaussKernel, sigmaX=8, sigmaY=0)

        # magnitude = magnitude**2
        # magnitude = np.float32(magnitude)/np.max(magnitude)

        return magnitude
コード例 #33
0
ファイル: render.py プロジェクト: jarrahl/skyscreen
	def _make_mapping_matrix(self):
		paintable_area = 0.95 * (self._window_size / 2.0 - self._annulus)
		angles = np.zeros((Screen.screen_vane_count, Screen.screen_max_magnitude))
		magnitudes = np.zeros((Screen.screen_vane_count, Screen.screen_max_magnitude))
		for angle in xrange(Screen.screen_vane_count):
			for mag in xrange(Screen.screen_max_magnitude):
				render_angle = (angle+0.5) / float(Screen.screen_vane_count) * 2.0 * 3.14159
				render_mag = self._annulus + (Screen.screen_max_magnitude - mag) / \
											float(Screen.screen_max_magnitude) * paintable_area
				angles[angle, mag] = render_angle
				magnitudes[angle, mag] = render_mag
		cols, rows = cv2.polarToCart(magnitudes, angles)
		cols = np.round((cols + self._window_size / 2)).astype(np.int32)
		rows = np.round((rows + self._window_size / 2)).astype(np.int32)

		return cols, rows
コード例 #34
0
def polar_transformation(image, origin, size, distance_range, angle_range=(0, 2*np.pi)):
	h, w = image.shape[0:2]
	distance = np.linspace(distance_range[0], distance_range[1], size[0])
	angle = np.linspace(angle_range[0],angle_range[1],size[1])
	angle_arr, distance_arr = np.meshgrid(angle, distance)

	xv, yv = cv2.polarToCart(distance_arr, angle_arr)
	xv += origin[1]
	yv += origin[0]

	yv = np.clip(yv, 0, h - 1)
	xv = np.clip(xv, 0, w - 1)

	if len(image.shape) == 3:
		polar_image = image[yv.astype('int32'), xv.astype('int32'), :]
	else:
		polar_image = image[yv.astype('int32'), xv.astype('int32')]
	return polar_image
コード例 #35
0
ファイル: restore.py プロジェクト: ajdroid/DIP15
print dft1.shape
dft_shift1 = np.fft.fftshift(dft1)
dftmag1, dftphase1 = cv2.cartToPolar(dft_shift1[:,:,0], dft_shift1[:,:,1])
invdftmag = np.ones(dftmag1.shape)
invdftphase = np.zeros(dftphase1.shape)


for i in range(-240,241):
	for j in range(-240,241):
		if np.exp(-0.0025*(i*i+j*j)**(5/6)) >= 0.01:
			invdftmag[i,j] = np.exp(0.0025*(i*i+j*j)**(5/6))
			
print invdftmag
dftmag2 = invdftmag*dftmag1
dftphase2 = dftphase1 - invdftphase

dft_shift1[:,:,0], dft_shift1[:,:,1] = cv2.polarToCart(dftmag2, dftphase2)

f_ishift = np.fft.ifftshift(dft_shift1)
img2 = cv2.idft(f_ishift)
img2 = cv2.magnitude(img2[:,:,0],img2[:,:,1])
print img1
print img2/np.max(img2)*255

plt.subplot(121),plt.imshow(img1, cmap = 'gray')
plt.title('Input'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(np.uint8(img2/np.max(img2)*255), cmap = 'gray')
plt.title('Restored'), plt.xticks([]), plt.yticks([])

plt.show()
コード例 #36
0
def animate(i):
    global opts, framegrabber, frames, params, flowStartFrame
    global roi, flowMask, lmask, rmask, tmask, bmask
    global flowVals, times, history
    global logfile
    global codes

    update = [imdisp, foedisp, q_foe, b_latdiv, b_verdiv, b_ttc]    
    t1 = time.time()
    # ------------------------------------------------------------
    # Compute optical flow
    # ------------------------------------------------------------    
    # grab the current frame, update indices
    clrframe = framegrabber.next()
    currFrame = cv2.cvtColor(clrframe, cv2.COLOR_BGR2GRAY)
    framenum = i + flowStartFrame
    times[i] = framenum

    prvs = sum(frames) / float(opts.frameavg)
    nxt = (sum(frames[1:]) + currFrame) / float(opts.frameavg)
    flow = cv2.calcOpticalFlowFarneback(prvs[startY:stopY,startX:stopX]
                                        , nxt[startY:stopY,startX:stopX]
                                        , **params)
    mag, angle = cv2.cartToPolar(flow[...,0], flow[...,1])

    # ------------------------------------------------------------
    # Remove outlier flow
    # ------------------------------------------------------------    
    if opts.nofilt:
        thresh_mask = flowMask
    else:
        # clean up flow estimates, remove outliers
        # thresh_mask = threshold_local(mag, shape=(20,20), llim=0, ulim=0.96)
        # global_mask = threshold_global(mag, llim=0.00, ulim=0.99)[0]
        global_mask = np.ones_like(flowMask)
        lthresh = 1e-3
        thresh_mask = (mag > lthresh) & global_mask
        flow[~thresh_mask] = 0
        mag[~thresh_mask] = 0

    # ------------------------------------------------------------
    # estimate the location of the FoE
    # ------------------------------------------------------------    
    # S = generic2dFilter(angle, (foeW, foeW), matchWin, step=dt, padded=True)
    # participants = generic2dFilter(thresh_mask, (foeW, foeW), np.sum
    #                                , padded=True, step=dt)
    # S /= participants
    # foe_y_subsearch, foe_x_subsearch = np.unravel_index(np.argmin(S), S.shape)
    # foe_y, foe_x = startY + foeW//2 + foe_y_subsearch*dt , startX + foeW//2 + foe_x_subsearch*dt
    # foe_x, foe_y = FindFoE(flow[...,0][foeW//2:-foeW//2],flow[...,1][foeW//2:-foeW//2])
    # print
    # print foe_x, foe_y
    # foe_x, foe_y = startX + maskW//2, startY + maskW//2
    foe_x, foe_y = 183, 80
    p0, p1 = (foe_x-foeW//2, foe_y-foeW//2), (foe_x+foeW//2, foe_y+foeW//2)
    # confidence= participants[foe_y_subsearch, foe_x_subsearch] / (foeW**2)
    confidence=0
    divTemplates = generate2dTemplates(p0, p1, thresh_mask.shape, thresh_mask)
    foe_tmask, foe_bmask, foe_lmask, foe_rmask = divTemplates
    foeSlice_y, foeSlice_x = slice(p0[1],p1[1]+1), slice(p0[0],p1[0]+1)

    # ------------------------------------------------------------
    # estimate divergence parameters and ttc for this frame
    # ------------------------------------------------------------
    xDiv = (np.sum(flow[rmask,0]) - np.sum(flow[lmask,0])) \
           /(np.sum((lmask|rmask) & thresh_mask) + EPS)
    yDiv = (np.sum(flow[tmask,1]) - np.sum(flow[bmask,1])) \
           /(np.sum((tmask|bmask) & thresh_mask) + EPS)
    xDiv_foe = (np.sum(flow[foe_rmask,0]) - np.sum(flow[foe_lmask,0])) \
               /(np.sum(foe_lmask|foe_rmask) + EPS)
    yDiv_foe = (np.sum(flow[foe_tmask, 1]) - np.sum(flow[foe_bmask, 1])) \
               /(np.sum(foe_tmask|foe_bmask) + EPS)
    ttc = 2/(xDiv + yDiv + EPS)
    history[:, :-1] = history[:, 1:]; history[:, -1] = (xDiv,yDiv,ttc)

    # ------------------------------------------------------------    
    # use estimation history to estimate new values
    # ------------------------------------------------------------
    if i > history.shape[1]:
        flowVals[:-1, i] = np.sum(history[:-1]*w_forget, axis=1)/sum(w_forget)
        # flowVals[-1, i] = np.median(history[-1,-3:])

        # m, y0, _, _, std = stats.linregress(np.arange(5)
        #                                     , flowVals[-1,i-4:i+1]*w_forget[:5]/sum(w_forget[:5]))
        # flowVals[2, i] =  m*times[i] + y0
        # flowVals[2, i] = np.median(history[-1, -3:])
        flowVals[2, i] = stats.trim_mean(history[-1, -5:],0.4)
    else:
        flowVals[:, i] = (xDiv,yDiv,ttc)

    # ------------------------------------------------------------            
    # write out results
    # ------------------------------------------------------------
    t2 = time.time()
    out = (framenum, xDiv, yDiv, ttc, 100.*confidence, t2-t1)
    if opts.log:
        print >> logfile, ','.join(map(str,out))
    if not opts.quiet:
        sys.stdout.write("\r%4d %+6.2f %+6.2f %6.2f %6.2f %6.2f" % out)
        sys.stdout.flush()

    # ------------------------------------------------------------
    # update figure
    # ------------------------------------------------------------    
    b_latdiv.set_height(flowVals[0,i])
    b_verdiv.set_height(flowVals[1,i])
    b_ttc.set_height(flowVals[2,i])
    foedisp.set_data(clrframe[foeSlice_y, foeSlice_x, ::-1].copy())
    # clrframe[mag <= lthresh, :] = codes.colors[0][::-1]
    # clrframe[~global_mask, :] = codes.colors[-1][::-1]
    cv2.rectangle(clrframe, p0, p1, color=(0,255,0))
    cv2.rectangle(clrframe, p0, (foe_x, foe_y+foeW//2), color=(255,0,0))
    if opts.vis == "color_overlay":
        cf.colorFlow(flow, clrframe[...,::-1]
                     , slice(startX,stopX), slice(startY,stopY), thresh_mask)
        dispim = clrframe[..., ::-1]
    elif opts.vis == "color":
        dispim = cf.flowToColor(flow)
    elif opts.vis == "quiver":
        update.append(q_img) # add this object to those that are to be updated
        q_img.set_UVC(flow[flow_strides, flow_strides, 0]
                      , flow[flow_strides, flow_strides, 1]
                      , (mag[flow_strides, flow_strides] \
                         *255/(np.max(mag)-np.min(mag)+EPS)))
        dispim = clrframe[..., ::-1]
    imdisp.set_data(dispim)

    unitmag = 2*np.ones(foedisp.get_size())
    foeKern = generateFoEkernel(foeW)
    foeKern[foeW//2, foeW//2] = angle[foe_y,foe_x]
    sim = (foeKern-angle[foeSlice_y,foeSlice_x])**2
    X, Y = cv2.polarToCart(unitmag, angle[foeSlice_y,foeSlice_x].astype(np.float))
    q_foe.set_UVC(X[1:-1:2, 1:-1:2], Y[1:-1:2, 1:-1:2]
                  , (sim[1:-1:2, 1:-1:2] \
                     *255/(np.max(sim)-np.min(sim)+EPS)))

    # shift the frame buffer
    frames[:-1] = frames[1:]; frames[-1] = currFrame

    return update
コード例 #37
0
ファイル: neuron.py プロジェクト: napratin/nap
    def __init__(
        self,
        numNeurons=1000,
        timeNow=0.0,
        neuronTypes=[Neuron],
        bounds=default_bounds,
        neuronLocations=None,
        distribution=default_distribution,
        **kwargs
    ):
        self.id = Population.id_ctr
        Population.id_ctr += 1

        self.numNeurons = numNeurons
        self.timeNow = timeNow
        self.neuronTypes = neuronTypes
        self.bounds = bounds
        self.center = (self.bounds[0] + self.bounds[1]) / 2
        self.distribution = distribution
        self.isConnected = False
        self.plotColor = population_plot_colors[self.id % len(population_plot_colors)]  # [graph]
        self.inhibitoryConnectionColor = inhibitory_connection_color  # [graph]

        self.logger = logging.getLogger(
            self.__class__.__name__
        )  # we could use "{}.{}".format(self.__class__.__name__, self.id) instead, but that'll create separate loggers for each Population
        self.logger.info("Creating {}".format(self))
        self.logger.debug("Bounds: x: {}, y: {}, z: {}".format(self.bounds[:, 0], self.bounds[:, 1], self.bounds[:, 2]))

        # * Designate neuron locations
        if neuronLocations is not None:
            self.neuronLocations = neuronLocations
        else:
            self.neuronLocations = []
            if isinstance(self.distribution, MultivariateUniform):
                # NOTE self.distribution has to be a 3-channel MultivariateUniform, even if the third channel is a constant (low=high)
                self.neuronLocations = np.column_stack(
                    [
                        np.random.uniform(self.distribution.lows[0], self.distribution.highs[0], self.numNeurons),
                        np.random.uniform(self.distribution.lows[1], self.distribution.highs[1], self.numNeurons),
                        np.random.uniform(self.distribution.lows[2], self.distribution.highs[2], self.numNeurons),
                    ]
                )
                # self.logger.debug("MultivariateUniform array shape: {}".format(self.neuronLocations.shape))
            elif isinstance(self.distribution, MultivariateNormal):
                # self.logger.debug("Distribution: mu: {}, cov: {}".format(self.distribution.mu, self.distribution.cov))  # ugly
                self.neuronLocations = np.random.multivariate_normal(
                    self.distribution.mu, self.distribution.cov, self.numNeurons
                )
            elif isinstance(self.distribution, SymmetricNormal):
                thetas = np.random.uniform(pi, -pi, self.numNeurons)  # symmetric in any direction around Z axis
                rads = np.random.normal(
                    self.distribution.mu, self.distribution.sigma, self.numNeurons
                )  # varies radially
                xLocs, yLocs = cv2.polarToCart(rads, thetas)
                zLocs = np.repeat(np.float32([self.distribution.center[2]]), self.numNeurons).reshape(
                    (self.numNeurons, 1)
                )  # constant z, repeated as a column vector
                # self.logger.debug("SymmetricNormal array shapes:- x: {}, y: {}, z: {}".format(xLocs.shape, yLocs.shape, zLocs.shape))
                self.neuronLocations = np.column_stack(
                    [self.distribution.center[0] + xLocs, self.distribution.center[1] + yLocs, zLocs]
                )  # build Nx3 numpy array
            elif isinstance(self.distribution, SymmetricLogNormal):
                thetas = np.random.uniform(pi, -pi, self.numNeurons)  # symmetric in any direction around Z axis
                rads = np.random.lognormal(
                    self.distribution.mu, self.distribution.sigma, self.numNeurons
                )  # varies radially
                xLocs, yLocs = cv2.polarToCart(rads, thetas)
                zLocs = np.repeat(np.float32([self.distribution.center[2]]), self.numNeurons).reshape(
                    (self.numNeurons, 1)
                )  # constant z, repeated as a column vector
                # self.logger.debug("SymmetricLogNormal array shapes:- x: {}, y: {}, z: {}".format(xLocs.shape, yLocs.shape, zLocs.shape))
                self.neuronLocations = np.column_stack(
                    [self.distribution.center[0] + xLocs, self.distribution.center[1] + yLocs, zLocs]
                )  # build Nx3 numpy array
            else:
                raise ValueError("Unknown distribution type: {}".format(type(self.distribution)))
            # TODO Include (non-central) F distribution (suitable for rods)

        # Clip (clamp) neuron locations that are outside bounds
        np.clip(self.neuronLocations[:, 0], self.bounds[0, 0], self.bounds[1, 0], out=self.neuronLocations[:, 0])
        np.clip(self.neuronLocations[:, 1], self.bounds[0, 1], self.bounds[1, 1], out=self.neuronLocations[:, 1])
        # print "Out-of-bounds neuron locations:", [loc for loc in self.neuronLocations if not ((self.bounds[0, 0] <= loc[0] <= self.bounds[1, 0]) and (self.bounds[0, 1] <= loc[1] <= self.bounds[1, 1]))]  # [debug]

        # print "Neuron locations:\n", self.neuronLocations  # [debug]

        # * Create neurons
        self.neurons = self.numNeurons * [None]
        self.neuronPlotColors = self.numNeurons * [None]
        for i in xrange(self.numNeurons):
            self.neurons[i] = random.choice(self.neuronTypes)(self.neuronLocations[i], self.timeNow, **kwargs)
            self.neuronPlotColors[i] = self.neurons[i].plotColor

        # * Build spatial index using quadtree (assuming neurons are roughly in a layer)
        boundingRect = (self.bounds[0, 0], self.bounds[0, 1], self.bounds[1, 0], self.bounds[1, 1])
        self.qtree = QuadTree(self.neurons, depth=int(log(self.numNeurons, 2)), bounding_rect=boundingRect)
コード例 #38
0
        image_to_analyze = cv2.imread('/home/frederik/pcl/images/perspective_result.jpeg', 0)        

        
        
        src_f = np.array(image_to_analyze, dtype=np.float32)
        src_f /= 255.
        
        dftl = cv2.dft(src_f,flags = cv2.DFT_COMPLEX_OUTPUT)
        da, fa = cv2.split(dftl)
        ma, pa = cv2.cartToPolar(da, fa)
        ma = np.fft.fftshift(ma)
 
        result = m * ma
                       
        ma = np.fft.fftshift(result)
        d2, f2 = cv2.polarToCart(ma, pa)

        test = np.zeros( (d.shape[0] , d.shape[1] , 2), dtype=np.float)         
        test[:,:,0] = d2
        test[:,:,1] = f2
        
        img_f = cv2.dft(test,flags = cv2.DFT_INVERSE + cv2.DFT_SCALE + cv2.DFT_REAL_OUTPUT)   

                
        cv2.imshow("Filter ", img_f )        

        
        #m = m * im
        #cv2.imshow("Magnitude_after", np.uint8( 20*np.log(m) ) )
        #cv2.imshow("Filter", np.uint8(im*255))
        
コード例 #39
0
def dirFilter2D(mSize,nBands):
    filts=[]
    dirs=np.zeros((2,nBands),np.float)
    theta=np.array(range(nBands))*math.pi/nBands
    rho=np.ones(nBands)
    X,Y=cv.polarToCart(rho,theta)
    #X=np.cos(theta)
    #Y=np.sin(theta)
    dirs[0,:] =X.transpose()
    dirs[1,:] =Y.transpose()
    for k in np.array(range(nBands),np.float): 
       ang1 = (k-0.5)*math.pi/nBands;
       ang2 = (k+ 0.5)*math.pi/nBands;
       theta = np.array([ang1, ang2, ang1, ang2, ang1],float)
       if flag==0:
           #triangular section generation
           Ang1=k*math.pi/nBands;
           Ang2=(k+1)*math.pi/nBands;
           Theta=np.array([Ang1,Ang2],float)
           Rho1=np.array([1,1],float)*math.floor(mSize/2)
        #       xCor,yCor=cv.polarToCart(Rho,Theta)
           x=Rho1*np.cos(Theta)+math.ceil(mSize/2)
           y=Rho1*np.sin(Theta)+math.ceil(mSize/2)
           
           Mask1=np.zeros((mSize,mSize),np.float)
           polyVerticesTemp=np.array(np.round([[x[0],y[0]],[x[1],y[1]],[mSize/2,mSize/2]]),np.int32)
        #       polyVertices=polyVerticesTemp.reshape(2,3)
        #       polyVerticesNew=polyVertices.transpose()
           Mask1=cv.fillConvexPoly(Mask1, polyVerticesTemp, 1)
           
           Rho2=np.array([-1,-1],float)*math.floor(mSize/2)
        #       xCor,yCor=cv.polarToCart(Rho,Theta)
           x=Rho2*np.cos(Theta)+math.ceil(mSize/2)
           y=Rho2*np.sin(Theta)+math.ceil(mSize/2)
           
           Mask2=np.zeros((mSize,mSize),np.float)
           polyVerticesTemp=np.array(np.round([[x[0],y[0]],[x[1],y[1]],[mSize/2,mSize/2]]),np.int32)
        #       polyVertices=polyVerticesTemp.reshape(2,3)
        #       polyVerticesNew=polyVertices.transpose()
           Mask2=cv.fillConvexPoly(Mask2, polyVerticesTemp, 1)
                  
           Mask=sc.logical_or(Mask1, Mask2)
           Mask=Mask.astype(float)
           plt.imshow(Mask)
           N=np.float(cv.countNonZero(Mask))
           plt.title(N)       
           plt.show()
       else:
           #rectangle generation
           rho = np.array([1,1,-1,-1,1],float)*(mSize/2)
           X,Y=cv.polarToCart(rho,theta)
           #X=np.cos(theta)*rho
           #Y=np.sin(theta)*rho
#           X=X+math.ceil(mSize/2)
#           Y=Y+math.ceil(mSize/2)
           X=np.round(X+mSize/2)
           Y=np.round(Y+mSize/2)
           Mask=np.zeros((mSize,mSize),np.float)
           polyVerticesTemp=np.array([X,Y],np.int32)
           polyVertices=polyVerticesTemp.reshape(2,5)
           polyVerticesNew=polyVertices.transpose()
           Mask=cv.fillConvexPoly(Mask, polyVerticesNew, 1)
           plt.imshow(Mask)
           N=np.float(cv.countNonZero(Mask))
           plt.title(N)
           plt.show()
           
           filts.append(Mask/N)
           filts.append(Mask)
           
           


       
    return filts, dirs
コード例 #40
0
def get_limb_pts(eye_img, phi=20, angle_step=1, debug_index=False):
    
    polar_img_w = 360 / angle_step                                      # Polar image has one column per angle of interest
    phi_range_1 = ((90 - phi) / angle_step, (90 + phi) / angle_step)    # Ranges of angles to be ignored (too close to lids)
    phi_range_2 = ((270 - phi) / angle_step, (270 + phi) / angle_step)
    
    eye_img_grey = cv2.cvtColor(eye_img, cv2.COLOR_BGR2GRAY)      # Do BGR-grey
    eye_img_grey = cv2.medianBlur(eye_img_grey, 5)
    
    # Scale to fixed size image for re-using transform matrix
    scale = eye_img.shape[0] / float(__fixed_width)
    img_fixed_size = cv2.resize(eye_img_grey, (__fixed_width, __fixed_width))
    
    # Transform image into polar coords and blur
    img_polar = linpolar(img_fixed_size, trans_w=polar_img_w, trans_h=__fixed_width / 2)
    img_polar = cv2.GaussianBlur(img_polar, (5, 5), 0)
    
    # Take the segment between min & max radii and filter with Gabor kernel
    img_polar_seg = img_polar[__min_limb_r:__max_limb_r, :]
    filter_img = cv2.filter2D(img_polar_seg, -1, __gabor_kern)
    
    # Black out ignored angles
    filter_img.T[ phi_range_1[0] : phi_range_1[1] ] = 0
    filter_img.T[ phi_range_2[0] : phi_range_2[1] ] = 0

    # In polar image, x <-> theta, y <-> magnitude         
    pol_ys = np.argmax(filter_img, axis=0)                      # Take highest filter response as limbus points
    pol_xs = np.arange(filter_img.shape[1])[pol_ys > 0]
    mags = (pol_ys + __min_limb_r)[pol_ys > 0]
    thts = np.radians(pol_xs * angle_step)

    # Translate each point back into fixed img coords
    xs, ys = cv2.polarToCart(mags.astype(float), thts)
    xs = (xs + __fixed_width / 2) * scale                       # Shift and scale cart. coords back to original eye-ROI coords
    ys = (ys + __fixed_width / 2) * scale
    
    # Points returned in form
    #    [[ x1   y1]
    #     [ x2   y2]
    #         ...
    #     [ xn   yn]]
    pts_cart = np.concatenate([xs, ys], axis=1)
    
    # --------------------- Debug Drawing ---------------------
    if debug_index != False:
        debug_img = eye_img.copy()
        debug_polar = cv2.cvtColor(img_polar, cv2.COLOR_GRAY2BGR)
        
        cv2.imwrite("polar.jpg",debug_polar)
        
        cv2.line(debug_polar, (0, __min_limb_r), (img_polar.shape[1], __min_limb_r), (255, 255, 0))
        cv2.line(debug_polar, (0, __max_limb_r), (img_polar.shape[1], __max_limb_r), (255, 255, 0))
        cv2.circle(debug_img, (debug_img.shape[1] / 2, debug_img.shape[0] / 2), int(debug_img.shape[0] * __limb_r_ratios[0]), (255, 255, 0))
        cv2.circle(debug_img, (debug_img.shape[1] / 2, debug_img.shape[0] / 2), int(debug_img.shape[0] * __limb_r_ratios[1]), (255, 255, 0))
        
        pts_polar = np.squeeze(np.dstack([pol_xs, mags]))
        draw_points(debug_polar, pts_polar, (0, 0, 255), width=1)
        draw_points(debug_img, pts_cart, (0, 0, 255), width=1)
    
        stacked_imgs_polar = stack_imgs_vertical([debug_polar, filter_img])
        stacked_imgs = stack_imgs_horizontal([debug_img, eye_img_grey, stacked_imgs_polar])
        
        __debug_imgs[debug_index] = stacked_imgs
        
        if debug_index == 2:
            full_debug_img = stack_imgs_vertical([__debug_imgs[1], __debug_imgs[2]]);
            cv2.imshow(__winname, full_debug_img)
        elif debug_index > 2:
            cv2.imshow(__winname, stacked_imgs);
    # --------------------- Debug Drawing ---------------------

    return pts_cart
コード例 #41
0
ファイル: cv_image.py プロジェクト: denex/snafucator
def normalize_rect(rect):
    pol_sq = np.array([[d[0][0] for d in cv2.cartToPolar(float(x), float(y))] for x, y in rect])
    sorted_indices = np.lexsort((pol_sq[:, 1], pol_sq[:, 0]))
    pol_sq = pol_sq[sorted_indices]
    ordered_square = np.array([[d[0][0] for d in cv2.polarToCart(m, a)] for m, a in pol_sq], dtype=np.float32)
    return ordered_square
コード例 #42
0
ファイル: swap.py プロジェクト: ajdroid/DIP15
img1 = cv2.imread("../hand.JPG", 0)
img2 = cv2.imread("../calender.JPG", 0)
imgf1 = np.float32(img1)
imgf2 = np.float32(img2)

dft1 = cv2.dft(imgf1, flags=cv2.DFT_COMPLEX_OUTPUT)
dft2 = cv2.dft(imgf2, flags=cv2.DFT_COMPLEX_OUTPUT)

# dftc1 = dft1
# dftc1[:,:,0] = dftc.real
# dftc1[:,:,1] = dftc.imag
dftmag1, dftphase1 = cv2.cartToPolar(dft1[:, :, 0], dft1[:, :, 1])
dftmag2, dftphase2 = cv2.cartToPolar(dft2[:, :, 0], dft2[:, :, 1])

dft1[:, :, 0], dft1[:, :, 1] = cv2.polarToCart(dftmag1, dftphase2)
dft2[:, :, 0], dft2[:, :, 1] = cv2.polarToCart(dftmag2, dftphase1)

# print dft1.shape

# dft_shift1 = np.fft.fftshift(dft1)
# dft_shift2 = np.fft.fftshift(dft2)

# magnitude_spectrum = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))
# phase_spectrum = (cv2.phase(dft_shift[:,:,0],dft_shift[:,:,1]))


# dft_shift = np.fft.fftshift(dft1)
# magnitude_spectrum1 = 20*np.log(cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))
# phase_spectrum1 = (cv2.phase(dft_shift[:,:,0],dft_shift[:,:,1]))