def icp(d1, d2, max_iterate = 100):
    src = np.array([d1.T], copy=True).astype(np.float32)
    dst = np.array([d2.T], copy=True).astype(np.float32)
    
    knn = cv2.KNearest()
    responses = np.array(range(len(d2[0]))).astype(np.float32)
    knn.train(src[0], responses)
        
    Tr = np.array([[np.cos(0), -np.sin(0), 0],
                   [np.sin(0), np.cos(0),  0],
                   [0,         0,          1]])

    dst = cv2.transform(dst, Tr[0:2])
    max_dist = sys.maxint
    
    scale_x = np.max(d1[0]) - np.min(d1[0])
    scale_y = np.max(d1[1]) - np.min(d1[1])
    scale = max(scale_x, scale_y)
       
    for i in range(max_iterate):
        ret, results, neighbours, dist = knn.find_nearest(dst[0], 1)
        
        indeces = results.astype(np.int32).T     
        indeces = del_miss(indeces, dist, max_dist)  
        
        T = cv2.estimateRigidTransform(dst[0, indeces], src[0, indeces], True)

        max_dist = np.max(dist)
        dst = cv2.transform(dst, T)
        Tr = np.dot(np.vstack((T,[0,0,1])), Tr)
        
        if (is_converge(T, scale)):
            break
        
    return Tr[0:2]
Ejemplo n.º 2
0
    def merge(self):
        min_row, min_col = 0, 0
        for image in self.images:
            if image.index in self.unique_indices:
                print('---finding final panograph size---')
                rows, cols = image.img.shape[:2]
                box = numpy.array([[0, 0], [cols - 1, 0], [cols - 1, rows - 1],
                                   [0, rows - 1]], dtype=numpy.float32).reshape(-1, 1, 2)
                transformed_box = cv2.transform(box, image.M)
                _min_col = min(transformed_box[:, :, 0])[0]
                _min_row = min(transformed_box[:, :, 1])[0]

                if _min_row < min_row:
                    min_row = _min_row
                if _min_col < min_col:
                    min_col = _min_col

        if min_row < 0:
            min_row = -min_row
        if min_col < 0:
            min_col = -min_col

        max_row, max_col = 0, 0
        for image in self.images:
            if image.index in self.unique_indices:
                print('---merging---')
                image.M[0, 2] += min_col
                image.M[1, 2] += min_row

                transformed_box = cv2.transform(box, image.M)
                _max_col = max(transformed_box[:, :, 0])[0]
                _max_row = max(transformed_box[:, :, 1])[0]

                if _max_row > max_row:
                    max_row = _max_row
                if _max_col > max_col:
                    max_col = _max_col

        result = numpy.zeros((max_row, max_col, 3), numpy.uint8)
        result.fill(255)
        result = cv2.cvtColor(result, cv2.COLOR_BGR2BGRA)
        for image in self.images:
            if image.index in self.unique_indices:
                transformed_img = cv2.warpAffine(image.img, image.M, (max_col, max_row), borderMode=cv2.BORDER_TRANSPARENT)
                transformed_img = cv2.cvtColor(transformed_img, cv2.COLOR_BGR2BGRA)
                numpy.copyto(result, transformed_img, where=numpy.logical_and(result == 255, transformed_img != 255))

        result = cv2.cvtColor(result, cv2.COLOR_BGRA2BGR)

        return result
Ejemplo n.º 3
0
def cut_from_rotated_rect(img_data, rot_box, expand_ratio, newH, contour, gt_pt_array):
    # extend the bbox width by a percent
    if rot_box[1][0] > rot_box[1][1]:
        rb1 = (rot_box[1][0] * expand_ratio, rot_box[1][1] * 1.1)
    else:
        rb1 = (rot_box[1][0] * 1.1, rot_box[1][1] * expand_ratio)
    rot_box = (rot_box[0], rb1, rot_box[2])
    # Get the 'contour' points
    rot_box_pts = cv2.boxPoints(rot_box).reshape((4, 1, 2))
    # Get the box width and height
    rMinD = min(rot_box[1])
    rMaxD = max(rot_box[1])
    # New width, the height is constant,setted above
    newW = float(newH) / rMinD * rMaxD
    # 2,1,0 so the whale is not upside down
    pt_orig = rot_box_pts[[2, 1, 0], 0, :]

    # find out what is the 2'nd point coordinates
    def get_dist(pt1, pt2):
        return math.sqrt(math.pow((pt1[0] - pt2[0]), 2) + math.pow((pt1[1] - pt2[1]), 2))

    d1 = get_dist(pt_orig[0], pt_orig[1])
    d2 = get_dist(pt_orig[1], pt_orig[2])
    if d1 < d2:
        mid_point = [0, newH]
    else:
        mid_point = [newW, 0]

    # create the destination coordinates
    pt_dest = np.array([[0, 0], mid_point, [newW, newH]]).astype(np.float32)
    inv_transf = cv2.getAffineTransform(pt_dest, pt_orig)
    transf = cv2.getAffineTransform(pt_orig, pt_dest)
    x1, y1 = np.meshgrid(np.arange(newW), np.arange(newH), indexing="xy")
    coord_trans = np.dstack([x1, y1])
    coord_trans2 = cv2.transform(coord_trans, inv_transf).astype(np.float32)
    transf_img = cv2.remap(img_data, coord_trans2, None, interpolation=cv2.INTER_CUBIC)
    # Transform the contour and the 2 GT points
    if contour is not None:
        transf_contour = cv2.transform(contour, transf).astype(np.int32)
    else:
        transf_contour = None

    if gt_pt_array is not None:
        transf_gt_pts = cv2.transform(gt_pt_array, transf).astype(np.int32).reshape((2, 2)).tolist()
    else:
        transf_gt_pts = None

    return transf_img, rot_box_pts, transf_contour, transf_gt_pts
def getNormalizedLandmarks(img, predictor, d, fronter = None, win2 = None):
    shape = predictor(img, d)
    landmarks = list(map(lambda p: (p.x, p.y), shape.parts()))
    npLandmarks = np.float32(landmarks)
    if NORM_MODE == 0:
        npLandmarkIndices = np.array(landmarkIndices)            
        H = cv2.getAffineTransform(npLandmarks[npLandmarkIndices],
                                MINMAX_TEMPLATE[npLandmarkIndices])
        normLM = cv2.transform(np.asarray([npLandmarks]),H)[0,:,:]
        return normLM,shape
    else:
        assert fronter is not None
        thumbnail = fronter.frontalizeImage(img,d,npLandmarks)
        #thumbnail = imgEnhance(thumbnail)
        cut = thumbnail.shape[0]/5
        thumbnail = thumbnail[cut+5:thumbnail.shape[0]-cut-5,cut+10:thumbnail.shape[1]-cut-10,:].copy()
        newShape = predictor(thumbnail, dlib.rectangle(0,0,thumbnail.shape[0],thumbnail.shape[1]))
        if win2 is not None:
            win2.clear_overlay()
            win2.set_image(thumbnail)
            win2.add_overlay(newShape)
            #dlib.hit_enter_to_continue()
        landmarks = list(map(lambda p: (float(p.x)/thumbnail.shape[0], float(p.y)/thumbnail.shape[1]), newShape.parts()))
        npLandmarks = np.float32(landmarks)
        normLM = npLandmarks
        return normLM,shape,thumbnail
Ejemplo n.º 5
0
 def transform_points(self, points, mat, size, padding=0):
     matrix = mat * (size - 2 * padding)
     matrix[:,2] += padding
     points = np.expand_dims(points, axis=1)
     points = cv2.transform(points, matrix, points.shape)
     points = np.squeeze(points)
     return points
Ejemplo n.º 6
0
def getPano(M, img1, img2):
    rows, cols = img2.shape[:2]
    # This is the tricky part. For transform, the point is identified as pair
    # of (col, row) instead of (row, col)
    box = numpy.array([[0, 0], [cols - 1, 0], [cols - 1, rows - 1],
                       [0, rows - 1]], dtype=numpy.float32).reshape(-1, 1, 2)
    transformed_box = cv2.transform(box, M)
    min_col = min(transformed_box[:, :, 0])[0]
    min_row = min(transformed_box[:, :, 1])[0]

    if min_col < 0:
        transformed_box[:, :, 0] -= min_col
        M[0, 2] -= min_col

    if min_row < 0:
        transformed_box[:, :, 1] -= min_row
        M[1, 2] -= min_row

    max_col = max(transformed_box[:, :, 0])[0]
    max_row = max(transformed_box[:, :, 1])[0]

    I = numpy.array([[1, 0, 0], [0, 1, 0]], dtype=numpy.float)
    transformed_img1 = cv2.warpAffine(img1, I, (max_col, max_row))
    transformed_img2 = cv2.warpAffine(img2, M, (max_col, max_row))
    numpy.copyto(
        transformed_img1, transformed_img2, where=transformed_img1 == 0)

    return transformed_img1
Ejemplo n.º 7
0
    def coarse_alignment(t_inv):
        t_ide = np.identity(3, 'float32')[: 2]

        coarse_src_coords = cv2.transform( \
                coarse_trg_coords[:, None], t_inv)[:, 0]

        # Transform target for coarse grid search
        shape = tuple(int(s / d_shift) for s in src_mf.shape)
        trg_mf_t = cv2.warpAffine(trg_mf, t_inv / d_shift, shape[::-1])
        src_mf_t = cv2.warpAffine(src_mf, t_ide / d_shift, shape[::-1])

        # Coarse grid search
        t_corr_list = [libreg.affine_registration.match_template_brute( \
                 get_patch_at(trg_mf_t, \
                     src_coord / d_shift, patch_size / d_shift), \
                 scipy.fftpack.fft2(get_patch_at(src_mf_t, \
                     src_coord / d_shift, shift_space / d_shift)), \
                 rotation=slice(0, 1, 1) if angle_space is None \
                     else slice(-angle_space, +angle_space, d_angle), \
                 logscale_x=slice(0, 1, 1) if scale_space is None \
                     else slice(-scale_space, +scale_space, d_scale), \
                 logscale_y=slice(0, 1, 1) if scale_space is None \
                     else slice(-scale_space, +scale_space, d_scale), \
                 find_translation=libreg.affine_registration \
                     .cross_correlation_fft) \
                for src_coord in coarse_src_coords]
        dx = np.array([np.dot(t[:, :2], \
                (patch_size / d_shift, patch_size / d_shift)) \
              + t[:, 2] - (shift_space / d_shift, shift_space / d_shift) \
              for t, _ in t_corr_list])
        coarse_src_coords += dx * d_shift

        corr = np.array([corr for _, corr in t_corr_list], 'float32')

        return coarse_src_coords, corr
Ejemplo n.º 8
0
def get_align_mat(face, size, should_align_eyes):
    mat_umeyama = umeyama(numpy.array(face.landmarks_as_xy()[17:]), landmarks_2D, True)[0:2]

    if should_align_eyes is False:
        return mat_umeyama

    mat_umeyama = mat_umeyama * size

    # Convert to matrix
    landmarks = numpy.matrix(face.landmarks_as_xy())

    # cv2 expects points to be in the form np.array([ [[x1, y1]], [[x2, y2]], ... ]), we'll expand the dim
    landmarks = numpy.expand_dims(landmarks, axis=1)

    # Align the landmarks using umeyama
    umeyama_landmarks = cv2.transform(landmarks, mat_umeyama, landmarks.shape)

    # Determine a rotation matrix to align eyes horizontally
    mat_align_eyes = align_eyes(umeyama_landmarks, size)

    # Extend the 2x3 transform matrices to 3x3 so we can multiply them
    # and combine them as one
    mat_umeyama = numpy.matrix(mat_umeyama)
    mat_umeyama.resize((3, 3))
    mat_align_eyes = numpy.matrix(mat_align_eyes)
    mat_align_eyes.resize((3, 3))
    mat_umeyama[2] = mat_align_eyes[2] = [0, 0, 1]

    # Combine the umeyama transform with the extra rotation matrix
    transform_mat = mat_align_eyes * mat_umeyama

    # Remove the extra row added, shape needs to be 2x3
    transform_mat = numpy.delete(transform_mat, 2, 0)
    transform_mat = transform_mat / size
    return transform_mat
Ejemplo n.º 9
0
 def convert_to_sepia(self, frame):
     """Sepia filtras"""
     m_sepia = numpy.asarray([[0.393, 0.769, 0.189],
                          [0.349, 0.686, 0.168],
                          [0.272, 0.534, 0.131]])
     sepia = cv2.transform(frame, m_sepia)
     sepia = cv2.cvtColor(sepia, cv2.cv.CV_RGB2BGR)
     return sepia
Ejemplo n.º 10
0
def icp(a, b, init_pose=(0, 0, 0), no_iterations=13):
    '''
    The Iterative Closest Point estimator.
    Takes two cloudpoints a[x,y], b[x,y], an initial estimation of
    their relative pose and the number of iterations
    Returns the affine transform that transforms
    the cloudpoint a to the cloudpoint b.
    Note:
        (1) This method works for cloudpoints with minor
        transformations. Thus, the result depents greatly on
        the initial pose estimation.
        (2) A large number of iterations does not necessarily
        ensure convergence. Contrarily, most of the time it
        produces worse results.
    '''

    src = np.array([a.T], copy=True).astype(np.float32)
    dst = np.array([b.T], copy=True).astype(np.float32)

    # Initialise with the initial pose estimation
    Tr = np.array([[np.cos(init_pose[2]), -np.sin(init_pose[2]), init_pose[0]],
                   [np.sin(init_pose[2]), np.cos(init_pose[2]), init_pose[1]],
                   [0, 0, 1]])

    src = cv2.transform(src, Tr[0:2])

    for i in range(no_iterations):
        # Find the nearest neighbours between the current source and the
        # destination cloudpoint
        nbrs = NearestNeighbors(n_neighbors=1, algorithm='auto',
                                warn_on_equidistant=False).fit(dst[0])
        distances, indices = nbrs.kneighbors(src[0])

        # Compute the transformation between the current source
        # and destination cloudpoint
        T = cv2.estimateRigidTransform(src, dst[0, indices.T], False)

        # Transform the previous source and update the
        # current source cloudpoint
        src = cv2.transform(src, T)

        # Save the transformation from the actual source cloudpoint
        # to the destination
        Tr = np.dot(Tr, np.vstack((T, [0, 0, 1])))

    return Tr[0:2]
Ejemplo n.º 11
0
    def frame_edit(self, frame):
        """
        Applies sepia filter to the image.

        :param frame: image to be changed.
        :return: image with sepia filter.
        """
        sepia = cv2.transform(frame, self.kernel)
        return cv2.cvtColor(sepia, cv2.COLOR_RGB2BGR)
Ejemplo n.º 12
0
 def transform_points(self, points, mat, size, padding=0):
     """ Transform points along matrix """
     logger.trace("points: %s, matrix: %s, size: %s. padding: %s", points, mat, size, padding)
     matrix = self.transform_matrix(mat, size, padding)
     points = np.expand_dims(points, axis=1)
     points = cv2.transform(  # pylint: disable=no-member
         points, matrix, points.shape)
     retval = np.squeeze(points)
     logger.trace("Returning: %s", retval)
     return retval
Ejemplo n.º 13
0
 def transform(self, transformMat):
     if transformMat is None:
         return []
     else:
         pts = [self.leftTop(), self.rightTop(), self.rightBottom(), self.leftBottom()]
         pts = np.float32(pts).reshape(-1,1,2)
         transPts = cv2.transform(pts, transformMat)
         left, right = min(transPts[:,:,0]), max(transPts[:,:,0])
         top, bottom = min(transPts[:,:,1]), max(transPts[:,:,1])
         return Bbox(left, top, right, bottom)
Ejemplo n.º 14
0
def sepia_filter(cv_pic):
    #first line apply on output blue (rgb input)
    #second line apply on output green
    #third line apply on output red
    m_sepia = numpy.asarray([[0.131, 0.534, 0.272],
                             [0.168, 0.686, 0.349],
                             [0.189, 0.769, 0.393]])
    
    sepia = cv2.transform(cv_pic, m_sepia)
    return sepia
Ejemplo n.º 15
0
def sepia(img):
    kernel = np.array(
        ([0.272, 0.543, 0.131],
         [0.349, 0.686, 0.168],
         [0.393, 0.769, 0.189])
    )

    result = cv2.transform(img, kernel)

    return result
Ejemplo n.º 16
0
def warpImages(img1, img2, H):
    rows1, cols1 = img1.shape[:2]
    rows2, cols2 = img2.shape[:2]

    list_of_points_1 = np.float32([[0,0], [0,rows1], [cols1,rows1], [cols1,0]])
    temp_points = np.float32([[0,0], [0,rows2], [cols2,rows2], [cols2,0]])
    list_of_points_1 = np.array([list_of_points_1])
    temp_points = np.array([temp_points])
    #H =  np.array([[H[0][0], H[0][1], H[0][2]], [H[1][0], H[1][1], H[1][2]]])
    #list_of_points_2 = cv2.perspectiveTransform(temp_points, H)
    list_of_points_2 = cv2.transform(temp_points, H)
    list_of_points = np.concatenate((list_of_points_1, list_of_points_2), axis=0)


    [x_min, y_min] = np.min(np.min(list_of_points, axis=1), axis=0)
    [x_max, y_max] = np.max(np.max(list_of_points, axis=1), axis=0)

    print(list_of_points)
    print(x_min, y_min)

    translation_dist = [-x_min,-y_min]
    H_translation = np.array([[1, 0,translation_dist[0]], [0, 1, translation_dist[1]], [0,0,1]])

    #float d = H[0][0], H[0][1], H[0][2];

    #H_affine = np.array([[H[0][0], H[0][1], H[0][2]], [H[1][0], H[1][1], H[1][2]]])
    #H_translation_affine = np.array([[H_translation[0][0], H_translation[0][1], H_translation[0][2]], [H_translation[1][0], H_translation[1][1], H_translation[1][2]]])

    H_translation_affine = np.float32([[1,0,0],[0,1,0]])
    #H_affine = np.array([[H[0][0], H[0][1], H[0][2] + H_translation[0][2]], [H[1][0], H[1][1], H[1][2]+ H_translation[1][2]]])
    H_affine = np.array([[H[0][0], H[0][1], H[0][2]], [H[1][0], H[1][1], H[1][2]]])
    #output_img = cv2.warpPerspective(img2, H_translation.dot(H), (x_max-x_min, y_max-y_min))

    #img1_large = cv2.warpPerspective(img1, H_translation, (x_max-x_min, y_max-y_min))
    #output_img = cv2.warpPerspective(img2, H_translation.dot(H), (y_max-y_min, x_max-x_min))
    #img1_large = cv2.warpPerspective(img1, H_translation, (y_max-y_min, x_max-x_min))

    output_img = cv2.warpAffine(img2, H_affine, (y_max-y_min, x_max-x_min))
    img1_large = cv2.warpAffine(img1, H_translation_affine, (y_max-y_min, x_max-x_min))

    print(output_img.shape[:2])

    base_image = np.zeros((x_max-x_min, y_max-y_min, 3), np.uint8)

    print(base_image.shape[:2])

    (ret,data_map) = cv2.threshold(cv2.cvtColor(output_img, cv2.COLOR_BGR2GRAY),0, 255, cv2.THRESH_BINARY)

    base_image = cv2.add(base_image, img1_large, mask=np.bitwise_not(data_map), dtype=cv2.CV_8U)

    final_img = cv2.add(base_image, output_img, dtype=cv2.CV_8U)

    return final_img
Ejemplo n.º 17
0
    def warp_im(self, im, M, dshape):
        output_im = np.ones(dshape, dtype=im.dtype)*255
        translationMatrix = np.matrix([0, 0])
        moveImageSet = cv2.transform(dshape, translationMatrix)
        cv2.warpAffine(im,
                       M[:2],
                       (dshape[1], dshape[0]),
                       dst=output_im,
                       borderMode=cv2.BORDER_TRANSPARENT,
                       flags=cv2.WARP_INVERSE_MAP)

        return output_im
Ejemplo n.º 18
0
def applyTransform(points, T):
    """
    Apply transform to a list of points
    points: list of points
    T: rigid transformation matrix (shape 2x3)
    """
    dataA = np.array(points)
    src = np.array([dataA])
    data_dest = cv2.transform(src, T)
    a, b, c = data_dest.shape
    data_dest = np.reshape(data_dest, (b, c))
    return data_dest
Ejemplo n.º 19
0
def process_coord(coords):
    w = h = 600
    #eyecornerDst=[80,,(np.int(0.7*w),np.int(h/3))]
    eyecornerDst = [(80, 112), (175, 110)]
    eyecornerSrc = [coords[36, :], coords[45, :]]
    tform = face.similarityTransform(eyecornerSrc, eyecornerDst)
    points2 = np.reshape(coords, (68, 1, 2))

    points = cv2.transform(points2, tform)

    points = np.float32(np.reshape(points, (68, 2)))
    return points
Ejemplo n.º 20
0
def crop_minAreaRect(img, rect):
  angle = rect[2]
  rows,cols = img.shape[0], img.shape[1]
  matrix = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)
  img_rot = cv2.warpAffine(img,matrix,(cols,rows))

  rect0 = (rect[0], rect[1], 0.0)
  box = cv2.boxPoints(rect)
  pts = np.int0(cv2.transform(np.array([box]), matrix))[0]
  pts[pts < 0] = 0

  return img_rot[pts[1][1]:pts[0][1], pts[1][0]:pts[2][0]]
Ejemplo n.º 21
0
    def _preprocess(self, img):
        # Convert image to greyscale
        gs_luminosities = [0.07, 0.72, 0.21]
        grey = cv.transform(img, np.array(gs_luminosities).reshape((1, 3)))

        # Blur image
        blur = cv.blur(grey, (self.blur, self.blur))

        # Get the edges
        canny_kernel = np.array([[1, 1, 1], [1, -8, 1], [1, 1, 1]])
        edges = cv.filter2D(blur, -1, canny_kernel)
        return edges
Ejemplo n.º 22
0
    def get_feature_mask(aligned_landmarks_68, size,
                         padding=0, dilation=30):
        """ Return the face feature mask """
        # pylint: disable=no-member
        logger.trace("aligned_landmarks_68: %s, size: %s, padding: %s, dilation: %s",
                     aligned_landmarks_68, size, padding, dilation)
        scale = size - 2 * padding
        translation = padding
        pad_mat = np.matrix([[scale, 0.0, translation],
                             [0.0, scale, translation]])
        aligned_landmarks_68 = np.expand_dims(aligned_landmarks_68, axis=1)
        aligned_landmarks_68 = cv2.transform(aligned_landmarks_68,
                                             pad_mat,
                                             aligned_landmarks_68.shape)
        aligned_landmarks_68 = np.squeeze(aligned_landmarks_68)

        (l_start, l_end) = FACIAL_LANDMARKS_IDXS["left_eye"]
        (r_start, r_end) = FACIAL_LANDMARKS_IDXS["right_eye"]
        (m_start, m_end) = FACIAL_LANDMARKS_IDXS["mouth"]
        (n_start, n_end) = FACIAL_LANDMARKS_IDXS["nose"]
        (lb_start, lb_end) = FACIAL_LANDMARKS_IDXS["left_eyebrow"]
        (rb_start, rb_end) = FACIAL_LANDMARKS_IDXS["right_eyebrow"]
        (c_start, c_end) = FACIAL_LANDMARKS_IDXS["chin"]

        l_eye_points = aligned_landmarks_68[l_start:l_end].tolist()
        l_brow_points = aligned_landmarks_68[lb_start:lb_end].tolist()
        r_eye_points = aligned_landmarks_68[r_start:r_end].tolist()
        r_brow_points = aligned_landmarks_68[rb_start:rb_end].tolist()
        nose_points = aligned_landmarks_68[n_start:n_end].tolist()
        chin_points = aligned_landmarks_68[c_start:c_end].tolist()
        mouth_points = aligned_landmarks_68[m_start:m_end].tolist()
        l_eye_points = l_eye_points + l_brow_points
        r_eye_points = r_eye_points + r_brow_points
        mouth_points = mouth_points + nose_points + chin_points

        l_eye_hull = cv2.convexHull(np.array(l_eye_points).reshape(
            (-1, 2)).astype(int)).flatten().reshape((-1, 2))
        r_eye_hull = cv2.convexHull(np.array(r_eye_points).reshape(
            (-1, 2)).astype(int)).flatten().reshape((-1, 2))
        mouth_hull = cv2.convexHull(np.array(mouth_points).reshape(
            (-1, 2)).astype(int)).flatten().reshape((-1, 2))

        mask = np.zeros((size, size, 3), dtype=float)
        cv2.fillConvexPoly(mask, l_eye_hull, (1, 1, 1))
        cv2.fillConvexPoly(mask, r_eye_hull, (1, 1, 1))
        cv2.fillConvexPoly(mask, mouth_hull, (1, 1, 1))

        if dilation > 0:
            kernel = np.ones((dilation, dilation), np.uint8)
            mask = cv2.dilate(mask, kernel, iterations=1)

        logger.trace("Returning: %s", mask)
        return mask
Ejemplo n.º 23
0
 def get_original_roi(self, mat, size, padding=0):
     """ Return the square aligned box location on the original
         image """
     logger.trace("matrix: %s, size: %s. padding: %s", mat, size, padding)
     matrix = self.transform_matrix(mat, size, padding)
     points = np.array(
         [[0, 0], [0, size - 1], [size - 1, size - 1], [size - 1, 0]],
         np.int32)
     points = points.reshape((-1, 1, 2))
     matrix = cv2.invertAffineTransform(matrix)  # pylint: disable=no-member
     logger.trace("Returning: (points: %s, matrix: %s", points, matrix)
     return cv2.transform(points, matrix)  # pylint: disable=no-member
Ejemplo n.º 24
0
    def get_feature_mask(aligned_landmarks_68, size,
                         padding=0, dilation=30):
        """ Return the face feature mask """
        # pylint: disable=no-member
        logger.trace("aligned_landmarks_68: %s, size: %s, padding: %s, dilation: %s",
                     aligned_landmarks_68, size, padding, dilation)
        scale = size - 2 * padding
        translation = padding
        pad_mat = np.matrix([[scale, 0.0, translation],
                             [0.0, scale, translation]])
        aligned_landmarks_68 = np.expand_dims(aligned_landmarks_68, axis=1)
        aligned_landmarks_68 = cv2.transform(aligned_landmarks_68,
                                             pad_mat,
                                             aligned_landmarks_68.shape)
        aligned_landmarks_68 = np.squeeze(aligned_landmarks_68)

        (l_start, l_end) = FACIAL_LANDMARKS_IDXS["left_eye"]
        (r_start, r_end) = FACIAL_LANDMARKS_IDXS["right_eye"]
        (m_start, m_end) = FACIAL_LANDMARKS_IDXS["mouth"]
        (n_start, n_end) = FACIAL_LANDMARKS_IDXS["nose"]
        (lb_start, lb_end) = FACIAL_LANDMARKS_IDXS["left_eyebrow"]
        (rb_start, rb_end) = FACIAL_LANDMARKS_IDXS["right_eyebrow"]
        (c_start, c_end) = FACIAL_LANDMARKS_IDXS["chin"]

        l_eye_points = aligned_landmarks_68[l_start:l_end].tolist()
        l_brow_points = aligned_landmarks_68[lb_start:lb_end].tolist()
        r_eye_points = aligned_landmarks_68[r_start:r_end].tolist()
        r_brow_points = aligned_landmarks_68[rb_start:rb_end].tolist()
        nose_points = aligned_landmarks_68[n_start:n_end].tolist()
        chin_points = aligned_landmarks_68[c_start:c_end].tolist()
        mouth_points = aligned_landmarks_68[m_start:m_end].tolist()
        l_eye_points = l_eye_points + l_brow_points
        r_eye_points = r_eye_points + r_brow_points
        mouth_points = mouth_points + nose_points + chin_points

        l_eye_hull = cv2.convexHull(np.array(l_eye_points).reshape(
            (-1, 2)).astype(int)).flatten().reshape((-1, 2))
        r_eye_hull = cv2.convexHull(np.array(r_eye_points).reshape(
            (-1, 2)).astype(int)).flatten().reshape((-1, 2))
        mouth_hull = cv2.convexHull(np.array(mouth_points).reshape(
            (-1, 2)).astype(int)).flatten().reshape((-1, 2))

        mask = np.zeros((size, size, 3), dtype=float)
        cv2.fillConvexPoly(mask, l_eye_hull, (1, 1, 1))
        cv2.fillConvexPoly(mask, r_eye_hull, (1, 1, 1))
        cv2.fillConvexPoly(mask, mouth_hull, (1, 1, 1))

        if dilation > 0:
            kernel = np.ones((dilation, dilation), np.uint8)
            mask = cv2.dilate(mask, kernel, iterations=1)

        logger.trace("Returning: %s", mask)
        return mask
Ejemplo n.º 25
0
def icp(ref_cloud, new_cloud, init_pose=(0, 0, 0), no_iterations=13):
    '''
        The Iterative Closest Point estimator.
        Takes two cloudpoints a[x,y], b[x,y], an initial estimation of
        their relative pose and the number of iterations
        Returns the affine transform that transforms
        the cloudpoint a to the cloudpoint b.
        Note:
        '''
    ref_cloud = np.array([ref_cloud.T], copy=True).astype(np.float32)
    new_cloud = np.array([new_cloud.T], copy=True).astype(np.float32)

    #Initialise with the initial pose estimation
    Transf_mat = np.array(
        [[np.cos(init_pose[2]), -np.sin(init_pose[2]), init_pose[0]],
         [np.sin(init_pose[2]),
          np.cos(init_pose[2]), init_pose[1]], [0, 0, 1]])

    ref_cloud_t = cv2.transform(ref_cloud, Transf_mat[0:2])

    for i in range(no_iterations):
        #Find the nearest neighbours between the current source and the
        #destination cloudpoint
        nbrs = NearestNeighbors(n_neighbors=1,
                                algorithm='auto').fit(new_cloud[0])
        distances, indices = nbrs.kneighbors(ref_cloud_t[0])

        transformation = cv2.estimateAffinePartial2D(ref_cloud_t,
                                                     new_cloud[0, indices.T])

        #Transform the previous source and update the
        #current source cloudpoint
        ref_cloud_t = cv2.transform(new_cloud, transformation[0])

        #Save the transformation from the actual source cloudpoint
        #to the destination
        Transf_mat = np.dot(Transf_mat,
                            np.vstack((transformation[0], [0, 0, 1])))

    return Transf_mat[0:2]
Ejemplo n.º 26
0
    def align(self, image, shape):
        if len(shape) == 68:
            # extract the left and right eye (x, y)-coordinates
            (lStart, lEnd) = FACIAL_LANDMARKS_68_IDXS["left_eye"]
            (rStart, rEnd) = FACIAL_LANDMARKS_68_IDXS["right_eye"]

        leftEyePts = shape[lStart:lEnd]
        rightEyePts = shape[rStart:rEnd]

        # compute the center of mass for each eye
        leftEyeCenter = leftEyePts.mean(axis=0).astype("int")
        rightEyeCenter = rightEyePts.mean(axis=0).astype("int")

        # compute the angle between the eye centroids
        dY = rightEyeCenter[1] - leftEyeCenter[1]
        dX = rightEyeCenter[0] - leftEyeCenter[0]
        angle = np.degrees(np.arctan2(dY, dX)) - 180

        # compute the desired right eye x-coordinate based on the
        # desired x-coordinate of the left eye
        desiredRightEyeX = 1.0 - self.desiredLeftEye[0]

        # determine the scale of the new resulting image by taking
        # the ratio of the distance between eyes in the *current*
        # image to the ratio of distance between eyes in the
        # *desired* image
        dist = np.sqrt((dX ** 2) + (dY ** 2))
        desiredDist = (desiredRightEyeX - self.desiredLeftEye[0])
        desiredDist *= self.desiredFaceWidth
        scale = desiredDist / dist

        # compute center (x, y)-coordinates (i.e., the median point)
        # between the two eyes in the input image
        eyesCenter = ((leftEyeCenter[0] + rightEyeCenter[0]) // 2,
                      (leftEyeCenter[1] + rightEyeCenter[1]) // 2)

        # grab the rotation matrix for rotating and scaling the face
        M = cv2.getRotationMatrix2D(eyesCenter, angle, scale)

        # update the translation component of the matrix
        tX = self.desiredFaceWidth * 0.5
        tY = self.desiredFaceHeight * self.desiredLeftEye[1]
        M[0, 2] += (tX - eyesCenter[0])
        M[1, 2] += (tY - eyesCenter[1])

        # apply the affine transformation
        (w, h) = (self.desiredFaceWidth, self.desiredFaceHeight)
        output = cv2.warpAffine(image, M, (w, h),
                                flags=cv2.INTER_CUBIC)
        output_lnd = cv2.transform(shape[np.newaxis], M)
        # return the aligned face and lnd
        return output, np.squeeze(output_lnd)
Ejemplo n.º 27
0
def rotate_rotula(img):
    """
    Rotates image in a way which transvesal points of Rotula bone are pararel to the margins of the image

    :param img: image to be rotated
    """

    contours, _ = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    sorted_contours = sorted(contours, key=cv2.contourArea, reverse=True)
    femur = sorted_contours[0]
    extTop = tuple(femur[femur[:, :, 1].argmin()][0])
    for i in range(img.shape[0] - 1):
        for j in range(img.shape[1] - 1):
            if extTop[1] < i:
                img[i][j] = 0  #remove anything
    rotula_contour = sorted_contours[1]
    rect = cv2.minAreaRect(rotula_contour)
    angle = rect[2]

    if angle < -45:
        angle = (90 + angle)

# otherwise, just take the inverse of the angle to make
# it positive
    else:
        angle = -angle


# rotate the image to deskew it
    (h, w) = img.shape[:2]
    center = (w // 2, h // 2)
    M = cv2.getRotationMatrix2D(center, angle, 1.0)
    rotated = cv2.warpAffine(img,
                             M, (w, h),
                             flags=cv2.INTER_CUBIC,
                             borderMode=cv2.BORDER_REPLICATE)
    contours, _ = cv2.findContours(rotated, cv2.RETR_LIST,
                                   cv2.CHAIN_APPROX_SIMPLE)
    sorted_contours = sorted(contours, key=cv2.contourArea, reverse=True)
    c = sorted_contours[0]
    extLeft = tuple(c[c[:, :, 0].argmin()][0])
    extRight = tuple(c[c[:, :, 0].argmax()][0])
    transform_points = np.array([[[extLeft[0], extLeft[1]]],
                                 [[extRight[0], extRight[1]]]])
    M = cv2.getRotationMatrix2D(center, -angle, 1.0)
    rotated = cv2.warpAffine(img,
                             M, (w, h),
                             flags=cv2.INTER_CUBIC,
                             borderMode=cv2.BORDER_REPLICATE)
    tf2 = cv2.transform(transform_points, M)

    return rotated, (tf2[0][0], tf2[1][0])
Ejemplo n.º 28
0
def cropMinRect(img, rect):

    #===expand img
    angle = rect[2]
    rows, cols = img.shape[0], img.shape[1]
    # size fit
    theta = np.deg2rad(abs(angle))
    phi = np.pi / 2 - theta
    cols_new = toInt(cols * np.sin(theta) + rows * np.sin(phi))
    rows_new = toInt(cols * np.cos(theta) + rows * np.cos(phi))
    cols_add = toInt((cols_new - cols) / 2)  #in the border
    rows_add = toInt((rows_new - rows) / 2)
    #create a Border, for don't crop in rotation
    img = cv2.copyMakeBorder(img,
                             rows_add,
                             rows_add,
                             cols_add,
                             cols_add,
                             cv2.BORDER_CONSTANT,
                             value=(255, 255, 255))

    # rotate img
    M = cv2.getRotationMatrix2D((toInt(cols_new / 2), toInt(rows_new / 2)),
                                angle, 1.0)
    img_rot = cv2.warpAffine(
        img, M, (cols_new, rows_new))  ######<<==================FUNCAO LENTA
    #show(img_rot)

    #===rotate bounding box
    rect0 = (rect[0], rect[1], 0.0)
    if cv2.__version__[0] is '3':
        box = cv2.boxPoints(rect)
    else:  #version 2.x.x
        box = cv2.cv.BoxPoints(rect)
        box = np.array(box)

    for pt in box:  #resize box
        pt[0] += cols_add
        pt[1] += rows_add

    pts = np.int0(cv2.transform(np.array([box]), M))[0]
    pts[pts < 0] = 0

    # crop
    img_crop = img_rot[pts[1][1]:pts[0][1], pts[1][0]:pts[2][0]]

    #fix the base to be the largest side, if is not
    if img_crop.shape[0] > img_crop.shape[1]:
        #for color in img_crop:
        img_crop = cv2.transpose(img_crop)

    return img_crop
Ejemplo n.º 29
0
def getSubImage(rect, src):
    # Get center, size, and angle from rect
    center, size, theta = rect
    print(theta)
    if theta < -45:
        theta = (90 + theta)
    # else:
    #     theta = -theta
    center = tuple(map(int, center))
    M = cv2.getRotationMatrix2D(center, theta, 1)
    out = cv2.transform(src, M)
    print("out", len(out))
    return out
Ejemplo n.º 30
0
def map_points(pts, image_size, warpMatrix, distortion_coeffs, camera_matrix,warp_mode=cv2.MOTION_HOMOGRAPHY):
    #assert len(affine) == 6, "affine must have len == 6, has len {}".format(len(affine))

    # extra dimension makes opencv happy
    pts = np.array([pts], dtype=np.float)

    new_cam_mat, _ = cv2.getOptimalNewCameraMatrix(camera_matrix, distortion_coeffs, image_size, 1)
    new_pts = cv2.undistortPoints(pts, camera_matrix, distortion_coeffs, P=new_cam_mat)
    if warp_mode == cv2.MOTION_AFFINE:
        new_pts = cv2.transform(new_pts, cv2.invertAffineTransform(warpMatrix))
    if warp_mode == cv2.MOTION_HOMOGRAPHY:
        new_pts =cv2.perspectiveTransform(new_pts,np.linalg.inv(warpMatrix).astype(np.float32))
    return new_pts[0]
Ejemplo n.º 31
0
 def transform_points(self, points, mat, size, padding=0):
     """ Transform points along matrix """
     logger.trace("points: %s, matrix: %s, size: %s. padding: %s", points,
                  mat, size, padding)
     matrix = self.transform_matrix(mat, size, padding)
     points = np.expand_dims(points, axis=1)
     points = cv2.transform(
         points,  # pylint: disable=no-member
         matrix,
         points.shape)
     retval = np.squeeze(points)
     logger.trace("Returning: %s", retval)
     return retval
Ejemplo n.º 32
0
    def rotate_points(self, pts, angle=None, shape=None):
        """Rotate points in relation to image."""
        log.debug('Start rotate points.')
        if shape is None:
            shape = self.shape
        if angle is None:
            angle = self.angle

        not_cached = self.affine_mat is None or self.size_new is None
        changed_val = self.shape != shape or self.angle != angle
        if not_cached or changed_val:
            self.__get_affine_mat(shape, angle)
        return cv2.transform(pts, self.affine_mat[0:2])
Ejemplo n.º 33
0
 def getbbox(self):
     mat = self._mat
     if mat.dtype == bool:
         mat = mat.astype(np.uint8)
     _, thim = cv2.threshold(mat, 0, 255, cv2.THRESH_BINARY)
     ch = _channels(thim.shape)
     if ch > 1:
         thim = cv2.transform(thim, np.ones(ch, dtype=np.float32).reshape(1, ch))
     x, y, w, h = cv2.boundingRect(thim)
     if w == 0 and h == 0:
         return None
     rect = (x, y, x+w, y+h)
     return rect
Ejemplo n.º 34
0
def keypoints_shift_scale_rotate(keypoints, angle, scale, dx, dy, rows, cols, **params):
    target_keypoints = keypoints[:, :2]
    meta_inf = keypoints[:, 2:]

    height, width = rows, cols
    center = (width / 2, height / 2)
    matrix = cv2.getRotationMatrix2D(center, angle, scale)
    matrix[0, 2] += dx * width
    matrix[1, 2] += dy * height

    new_keypoints = cv2.transform(target_keypoints[None], matrix).squeeze()

    return np.hstack([new_keypoints, meta_inf])
Ejemplo n.º 35
0
 def create_rotation_matrix(self, offset=0):
     center = (self.center[0] + offset, self.center[1] + offset)
     rm = cv2.getRotationMatrix2D(tuple(center), self.angle, 1)
     if self.expand:
         # Find the coordinates of the center of rotation in the new image
         # The only point for which we know the future coordinates is the center of the image
         rot_im_center = cv2.transform(
             self.image_center[None, None, :] + offset, rm)[0, 0, :]
         new_center = np.array([self.bound_w / 2, self.bound_h / 2
                                ]) + offset - rot_im_center
         # shift the rotation center to the new coordinates
         rm[:, 2] += new_center
     return rm
Ejemplo n.º 36
0
def crop_minAreaRect(img, rect):

    # rotate img
    angle = rect[2]
    rows,cols = img.shape[0], img.shape[1]
    M = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)

    # rotate bounding box
    rect0 = (rect[0], rect[1], 0.0)
    box = cv2.boxPoints(rect0)
    pts = np.int0(cv2.transform(np.array([box]), M))[0]
    pts[pts < 0] = 0
    return img_crop
Ejemplo n.º 37
0
def blackAndWhite(img):
    res = img.copy()
    res = cv2.cvtColor(
        res, cv2.COLOR_BGR2RGB)  # converting to RGB as sepia matrix is for RGB
    res = np.array(res, dtype=np.float64)
    res = cv2.transform(
        res,
        np.matrix([[0.21, 0.72, 0.07], [0.21, 0.72, 0.07], [0.21, 0.72,
                                                            0.07]]))
    res[np.where(res > 255)] = 255  # clipping values greater than 255 to 255
    res = np.array(res, dtype=np.uint8)
    res = cv2.cvtColor(res, cv2.COLOR_RGB2BGR)
    return res
Ejemplo n.º 38
0
 def get_original_roi(self, mat, size, padding=0):
     """ Return the square aligned box location on the original
         image """
     logger.trace("matrix: %s, size: %s. padding: %s", mat, size, padding)
     matrix = self.transform_matrix(mat, size, padding)
     points = np.array([[0, 0],
                        [0, size - 1],
                        [size - 1, size - 1],
                        [size - 1, 0]], np.int32)
     points = points.reshape((-1, 1, 2))
     matrix = cv2.invertAffineTransform(matrix)  # pylint: disable=no-member
     logger.trace("Returning: (points: %s, matrix: %s", points, matrix)
     return cv2.transform(points, matrix)  # pylint: disable=no-member
Ejemplo n.º 39
0
 async def process(self):
     if not self.source.value:
         return
     image = self.source.value.image.copy()
     edges = cv2.Canny(image, 100, 200)
     image[edges > 0] = np.array([200, 200, 200])
     red_image = cv2.transform(image, self.TERMINATOR_MAT)
     if self.faces.value is not None:
         for (x, y, w, h) in self.faces.value:
             cv2.rectangle(red_image, (x, y), (x + w, y + h), (255, 255, 255), 2)
     frame = Frame(red_image, 'Terminator')
     self.output.push(frame)
     self.debug_output.push(frame)
Ejemplo n.º 40
0
def rotate_coords(coords, rotation, center):
    """Rotate coords around given center point
    :param coords: points to rotate
    :param rotation: rotation angle
    :param center: center of rotation
    """
    M = cv2.getRotationMatrix2D((center), rotation, 1)
    change_coords = [[item[1], item[0]] for item in coords]
    coords = np.array([change_coords])
    rotated_coords = cv2.transform(coords, M)[0]
    out_coords = [[item[1], item[0]] for item in rotated_coords]

    return np.asarray(out_coords)
Ejemplo n.º 41
0
def sepia(img):
    res = img.copy()
    res = cv2.cvtColor(
        res, cv2.COLOR_BGR2RGB)  # converting to RGB as sepia matrix is for RGB
    res = np.array(res, dtype=np.float64)
    res = cv2.transform(
        res,
        np.matrix([[0.393, 0.769, 0.189], [0.349, 0.686, 0.168],
                   [0.272, 0.534, 0.131]]))
    res[np.where(res > 255)] = 255  # clipping values greater than 255 to 255
    res = np.array(res, dtype=np.uint8)
    res = cv2.cvtColor(res, cv2.COLOR_RGB2BGR)
    return res
Ejemplo n.º 42
0
    def _transform_boundaries(
            boundaries: List[np.ndarray],
            transformationMatrix: np.ndarray) -> List[np.ndarray]:

        transformedList = []
        for b in boundaries:
            reshapedBoundaries = np.reshape(b, (1, b.shape[0], 2)).astype(
                np.float)
            transformedBoundaries = cv2.transform(
                reshapedBoundaries, transformationMatrix)[0, :, :2]
            transformedList.append(transformedBoundaries)

        return transformedList
    def __call__(self, sample):
        image, key_pts = sample['image'], sample['keypoints']

        c_x, c_y = int(image.shape[0]/2), int(image.shape[1]/2)
        ang = self.max_ang*np.random.rand()-self.max_ang
        M = cv2.getRotationMatrix2D((c_x, c_y), ang, 1.0)
        image = cv2.warpAffine(image, M, image.shape[:2])
        
        key_pts = np.reshape(np.array(key_pts), (68, 1, 2))
        key_pts = cv2.transform(key_pts, M)
        key_pts = np.float32(np.reshape(key_pts, (68, 2)))

        return {'image': image, 'keypoints': key_pts}
Ejemplo n.º 44
0
def col_deconvol_blur_clone(img, deconv_mat, size_blur):
    # deconvolution
    OD_data = transform_OD(img)
    deconv_mat = deconv_mat.astype('float32', copy=False)
    img_deconv = cv2.transform(OD_data, deconv_mat)
    ## blur
    nucl_blur = cv2.GaussianBlur(img_deconv[:, :, 0], size_blur1, 0)
    clone_blur = cv2.GaussianBlur(img_deconv[:, :, 1], size_blur, 0)
    ## convert to 8 bits
    clone_blur = np.clip(clone_blur, 0, 1, out=clone_blur)
    clone_blur *= 255
    clone_blur = clone_blur.astype('uint8', copy=False)
    return clone_blur
Ejemplo n.º 45
0
def align_crop(img,
               src_landmarks,
               mean_landmarks,
               crop_size=256,
               face_factor=0.7,
               landmark_factor=0.35):

    # move
    move = np.array([img.shape[1] // 2, img.shape[0] // 2])

    # pad border
    v_border = img.shape[0] - crop_size
    w_border = img.shape[1] - crop_size
    if v_border < 0:
        v_half = (-v_border + 1) // 2
        img = np.pad(img, ((v_half, v_half), (0, 0), (0, 0)), mode='edge')
        src_landmarks += np.array([0, v_half])
        move += np.array([0, v_half])
    if w_border < 0:
        w_half = (-w_border + 1) // 2
        img = np.pad(img, ((0, 0), (w_half, w_half), (0, 0)), mode='edge')
        src_landmarks += np.array([w_half, 0])
        move += np.array([w_half, 0])

    # estimate transform matrix
    mean_landmarks -= np.array([mean_landmarks[0, :] + mean_landmarks[1, :]
                                ]) / 2.0  # middle point of eyes as center
    trg_landmarks = mean_landmarks * (crop_size * face_factor *
                                      landmark_factor) + move
    tform = cv2.estimateAffinePartial2D(trg_landmarks,
                                        src_landmarks,
                                        ransacReprojThreshold=np.Inf)[0]

    # fix the translation to match the middle point of eyes
    trg_mid = (trg_landmarks[0, :] + trg_landmarks[1, :]) / 2.0
    src_mid = (src_landmarks[0, :] + src_landmarks[1, :]) / 2.0
    new_trg_mid = cv2.transform(np.array([[trg_mid]]), tform)[0, 0]
    tform[:, 2] += src_mid - new_trg_mid

    # warp image by given transform
    output_shape = (crop_size // 2 + move[1] + 1, crop_size // 2 + move[0] + 1)
    img_align = cv2.warpAffine(img,
                               tform,
                               output_shape[::-1],
                               flags=cv2.WARP_INVERSE_MAP + cv2.INTER_CUBIC,
                               borderMode=cv2.BORDER_REPLICATE)

    # crop
    img_crop = img_align[-crop_size:, -crop_size:]

    return img_crop
Ejemplo n.º 46
0
def Normalization(w, h, allPoints, allPoints_json, images):
    imagesNorm = []
    pointsNorm = []  #the normalized points and images

    boundaryPts = np.array([
        (0, 0),
        (w / 2, 0),
        (w - 1, 0),
        (w - 1, h / 2),
        (w - 1, h - 1),
        (w / 2, h - 1),
        (0, h - 1),
        (0, h / 2)
    ]
    )

    pointsAvg = np.array([[0, 0]] * len(allPoints[0]))  #an array representing the final average landmarks

    eyecorner_chin_Dst = [
        [0.3 * w, h / 2],
        [0.7 * w, h / 2],
        [0.5 * w, h * 0.9]
    ] #the final locations of eye conners and chin


    for i, image in enumerate(images):

        points = allPoints[i]
        #the two eye corners from the original image
        eyecorner_chin_Src = [allPoints_json[i]['left_eye_left_corner'], allPoints_json[i]['right_eye_right_corner'], allPoints_json[i]['contour_chin']]
        eyecorner_chin_Src = [[p['x'], p['y']] for p in eyecorner_chin_Src]



        tform, img = utils.applyAffineTransform(image, eyecorner_chin_Src, eyecorner_chin_Dst, (w, h))  # transform the original image


        points = np.reshape(cv2.transform(np.reshape(np.array(points), (-1, 1, 2)), tform), (-1, 2))  # transform the points
        points = np.maximum(points, 0)
        points = np.minimum(points, [w - 1, h - 1])


        pointsAvg += points  # contribute to the average points
        pointsNorm.append(np.append(points, boundaryPts, axis = 0))

        imagesNorm.append(img)

    pointsAvg = pointsAvg / len(images)


    return np.append(pointsAvg, boundaryPts, axis = 0), pointsNorm, imagesNorm
Ejemplo n.º 47
0
    def compute_location(self, kp1, des1, kp2, des2):
        """
        compute the global location of center of current image
        :param kp1: captured keyPoints
        :param des1: captured descriptions
        :param kp2: map keyPoints
        :param des2: map descriptions
        :return: global pose
        """
        matches = self.matcher.knnMatch(des1, des2, k=2)
        good = []
        pose = None

        for match in matches:
            if len(match) > 1 and match[
                    0].distance < MATCH_RATIO * match[1].distance:
                good.append(match[0])

        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([kp1[m.queryIdx].pt
                                  for m in good]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt
                                  for m in good]).reshape(-1, 1, 2)
            transform = cv2.estimateRigidTransform(src_pts, dst_pts, False)
            if transform is not None:
                transformed_center = cv2.transform(
                    CAMERA_CENTER, transform)  # get global pixel
                transformed_center = [
                    transformed_center[0][0][0] /
                    METER_TO_PIXEL,  # map to global pose
                    (MAP_PIXEL_HEIGHT - 1 - transformed_center[0][0][1]) /
                    METER_TO_PIXEL
                ]
                yaw = np.arctan2(transform[1, 0],
                                 transform[0, 0])  # get global heading

                # correct the pose if the drone is not level
                z = math.sqrt(self.z**2 / (1 + math.tan(self.angle_x)**2 +
                                           math.tan(self.angle_y)**2))
                offset_x = np.tan(self.angle_x) * z
                offset_y = np.tan(self.angle_y) * z
                global_offset_x = math.cos(yaw) * offset_x + math.sin(
                    yaw) * offset_y
                global_offset_y = math.sin(yaw) * offset_x + math.cos(
                    yaw) * offset_y
                pose = [
                    transformed_center[0] + global_offset_x,
                    transformed_center[1] + global_offset_y, z, yaw
                ]

        return pose, len(good)
Ejemplo n.º 48
0
def match_feature(templ,
                  haystack,
                  *,
                  min_match=10,
                  templ_mask=None,
                  haystack_mask=None,
                  limited_transform=False) -> FeatureMatchingResult:
    templ = np.asarray(templ.convert('L'))
    haystack = np.asarray(haystack.convert('L'))

    detector = cv.SIFT_create()
    kp1, des1 = detector.detectAndCompute(templ, templ_mask)
    kp2, des2 = detector.detectAndCompute(haystack, haystack_mask)

    # index_params = dict(algorithm=6,
    #                     table_number=6,
    #                     key_size=12,
    #                     multi_probe_level=2)
    index_params = dict(algorithm=0, trees=5)  # algorithm=FLANN_INDEX_KDTREE
    search_params = {}
    flann = cv.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1, des2, k=2)
    good = []
    for group in matches:
        if len(group) >= 2 and group[0].distance < 0.75 * group[1].distance:
            good.append(group[0])

    result = FeatureMatchingResult(len(kp1), len(good))

    if len(good) >= min_match:
        src_pts = np.float32([kp1[m.queryIdx].pt
                              for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt
                              for m in good]).reshape(-1, 1, 2)

        if limited_transform:
            M, _ = cv.estimateAffinePartial2D(src_pts, dst_pts)
        else:
            M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 4.0)

        h, w = templ.shape
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1],
                          [w - 1, 0]]).reshape(-1, 1, 2)
        if limited_transform:
            dst = cv.transform(pts, M)
        else:
            dst = cv.perspectiveTransform(pts, M)
        result.M = M
        result.template_corners = dst.reshape(-1, 2)
        # img2 = cv.polylines(haystack, [np.int32(dst)], True, 0, 2, cv.LINE_AA)
    return result
Ejemplo n.º 49
0
def estimateRigidTransform(srcPts, dstPts, inlierThreshold, outlierCoordScale = 1.0, fullAffine = False):
    srcPts = np.float32(srcPts).reshape(-1,1,2)
    dstPts = np.float32(dstPts).reshape(-1,1,2)
    M = cv2.estimateRigidTransform(srcPts, dstPts, fullAffine = fullAffine)
    print M
    if M is None:
        inlierMask = np.zeros(len(srcPts))
    else:
        inlierMask = []
        mappedPts = cv2.transform(srcPts, M)
        for mappedPt,dstPt in zip(mappedPts, dstPts):
            dist = np.linalg.norm(mappedPt/outlierCoordScale - dstPt/outlierCoordScale)
            inlierMask.append(int(dist < inlierThreshold))
        inlierMask = np.array(inlierMask)
    return M, inlierMask
Ejemplo n.º 50
0
    def GET(self):
        image_path = os.getcwd() + '/images/'
        img_list = os.listdir(image_path)
        img = cv2.imread(image_path + img_list[0], cv2.IMREAD_COLOR)

        kernel = np.array(
            ([0.272, 0.543, 0.131],
             [0.349, 0.686, 0.168],
             [0.393, 0.769, 0.189])
        )

        result = cv2.transform(img, kernel)

        _, data = cv2.imencode('.jpg', result)
        jpeg_base64 = base64.b64encode(data.tostring())
        return jpeg_base64
Ejemplo n.º 51
0
    def apply_new_face(self, image, new_face, image_mask, mat, image_size, size):
        base_image = numpy.copy( image )
        new_image = numpy.copy( image )

        cv2.warpAffine( new_face, mat, image_size, new_image, cv2.WARP_INVERSE_MAP, cv2.BORDER_TRANSPARENT )

        outImage = None
        if self.seamless_clone:
            masky,maskx = cv2.transform( numpy.array([ size/2,size/2 ]).reshape(1,1,2) ,cv2.invertAffineTransform(mat) ).reshape(2).astype(int)
            outimage = cv2.seamlessClone(new_image.astype(numpy.uint8),base_image.astype(numpy.uint8),(image_mask*255).astype(numpy.uint8),(masky,maskx) , cv2.NORMAL_CLONE )
        else:
            foreground = cv2.multiply(image_mask, new_image.astype(float))
            background = cv2.multiply(1.0 - image_mask, base_image.astype(float))
            outimage = cv2.add(foreground, background)

        return outimage
Ejemplo n.º 52
0
def get_align_mat(face, size, should_align_eyes):
    """ Return the alignment Matrix """
    logger.trace("size: %s, should_align_eyes: %s", size, should_align_eyes)
    mat_umeyama = umeyama(np.array(face.landmarks_as_xy[17:]), True)[0:2]

    if should_align_eyes is False:
        return mat_umeyama

    mat_umeyama = mat_umeyama * size

    # Convert to matrix
    landmarks = np.matrix(face.landmarks_as_xy)

    # cv2 expects points to be in the form
    # np.array([ [[x1, y1]], [[x2, y2]], ... ]), we'll expand the dim
    landmarks = np.expand_dims(landmarks, axis=1)

    # Align the landmarks using umeyama
    umeyama_landmarks = cv2.transform(  # pylint: disable=no-member
        landmarks,
        mat_umeyama,
        landmarks.shape)

    # Determine a rotation matrix to align eyes horizontally
    mat_align_eyes = func_align_eyes(umeyama_landmarks, size)

    # Extend the 2x3 transform matrices to 3x3 so we can multiply them
    # and combine them as one
    mat_umeyama = np.matrix(mat_umeyama)
    mat_umeyama.resize((3, 3))
    mat_align_eyes = np.matrix(mat_align_eyes)
    mat_align_eyes.resize((3, 3))
    mat_umeyama[2] = mat_align_eyes[2] = [0, 0, 1]

    # Combine the umeyama transform with the extra rotation matrix
    transform_mat = mat_align_eyes * mat_umeyama

    # Remove the extra row added, shape needs to be 2x3
    transform_mat = np.delete(transform_mat, 2, 0)
    transform_mat = transform_mat / size
    logger.trace("Returning: %s", transform_mat)
    return transform_mat
Ejemplo n.º 53
0
    def get_feature_mask(self, aligned_landmarks_68, size, padding=0, dilation=30):
        scale = size - 2*padding
        translation = padding
        pad_mat = np.matrix([[scale, 0.0, translation], [0.0, scale, translation]])
        aligned_landmarks_68 = np.expand_dims(aligned_landmarks_68, axis=1)
        aligned_landmarks_68 = cv2.transform(aligned_landmarks_68, pad_mat, aligned_landmarks_68.shape)
        aligned_landmarks_68 = np.squeeze(aligned_landmarks_68)

        (lStart, lEnd) = FACIAL_LANDMARKS_IDXS["left_eye"]
        (rStart, rEnd) = FACIAL_LANDMARKS_IDXS["right_eye"]
        (mStart, mEnd) = FACIAL_LANDMARKS_IDXS["mouth"]
        (nStart, nEnd) = FACIAL_LANDMARKS_IDXS["nose"]
        (lbStart, lbEnd) = FACIAL_LANDMARKS_IDXS["left_eyebrow"]
        (rbStart, rbEnd) = FACIAL_LANDMARKS_IDXS["right_eyebrow"]
        (cStart, cEnd) = FACIAL_LANDMARKS_IDXS["chin"]

        l_eye_points = aligned_landmarks_68[lStart:lEnd].tolist()
        l_brow_points = aligned_landmarks_68[lbStart:lbEnd].tolist()
        r_eye_points = aligned_landmarks_68[rStart:rEnd].tolist()
        r_brow_points = aligned_landmarks_68[rbStart:rbEnd].tolist()
        nose_points = aligned_landmarks_68[nStart:nEnd].tolist()
        chin_points = aligned_landmarks_68[cStart:cEnd].tolist()
        mouth_points = aligned_landmarks_68[mStart:mEnd].tolist()
        l_eye_points = l_eye_points + l_brow_points
        r_eye_points = r_eye_points + r_brow_points
        mouth_points = mouth_points + nose_points + chin_points

        l_eye_hull = cv2.convexHull(np.array(l_eye_points).reshape((-1, 2)).astype(int)).flatten().reshape((-1, 2))
        r_eye_hull = cv2.convexHull(np.array(r_eye_points).reshape((-1, 2)).astype(int)).flatten().reshape((-1, 2))
        mouth_hull = cv2.convexHull(np.array(mouth_points).reshape((-1, 2)).astype(int)).flatten().reshape((-1, 2))

        mask = np.zeros((size, size, 3), dtype=float)
        cv2.fillConvexPoly(mask, l_eye_hull, (1,1,1))
        cv2.fillConvexPoly(mask, r_eye_hull, (1,1,1))
        cv2.fillConvexPoly(mask, mouth_hull, (1,1,1))

        if dilation > 0:
            kernel = np.ones((dilation, dilation), np.uint8)
            mask = cv2.dilate(mask, kernel, iterations=1)

        return mask
Ejemplo n.º 54
0
def transform_cnt(img, cnt, trfm, anchor_im='centroid', anchor_bg='center'):
    """
    Place img on the background canvas, by aligning the anchor_im position on the img 
    with the anchor_bg position of the background canvas, and then apply trfm 
    on the image (with rotation centered around centroid of the image). Return the 
    canvas with transformed image.
    """
    if cnt is None:
        img = pad_image_cnt(img, cnt, anchor_im, anchor_bg)
    else:
        img, cnt = pad_image_cnt(img, cnt, anchor_im, anchor_bg)
    im_centroid = get_centroid(img)
    A = cv2.getRotationMatrix2D(im_centroid, np.rad2deg(trfm[2]), 1)
    A[0, 2] = A[0, 2] + trfm[0]
    A[1, 2] = A[1, 2] + trfm[1]
    img_warp = cv2.warpAffine(img, A, img.shape[0:2])
    if cnt is not None:
        cnt = cv2.transform(np.resize(cnt, (cnt.shape[0], 1, 2)), A)
        cnt = np.squeeze(cnt)
        return img_warp, cnt
    return img_warp
Ejemplo n.º 55
0
        points1 = allPoints[i];

        # Corners of the eye in input image
        eyecornerSrc  = [ allPoints[i][36], allPoints[i][45] ] ;
        
        # Compute similarity transform
        tform = similarityTransform(eyecornerSrc, eyecornerDst);
        
        # Apply similarity transformation
        img = cv2.warpAffine(images[i], tform, (w,h));

        # Apply similarity transform on points
        points2 = np.reshape(np.array(points1), (68,1,2));        
        
        points = cv2.transform(points2, tform);
        
        points = np.float32(np.reshape(points, (68, 2)));
        
        # Append boundary points. Will be used in Delaunay Triangulation
        points = np.append(points, boundaryPts, axis=0)
        
        # Calculate location of average landmark points.
        pointsAvg = pointsAvg + points / numImages;
        
        pointsNorm.append(points);
        imagesNorm.append(img);
    

    
    # Delaunay triangulation
Ejemplo n.º 56
0
def icp(a, b, init_pose=(0,0,0), no_iterations = 13):
    '''
    The Iterative Closest Point estimator.
    Takes two cloudpoints a[x,y], b[x,y], an initial estimation of
    their relative pose and the number of iterations
    Returns the affine transform that transforms
    the cloudpoint a to the cloudpoint b.
    Note:
        (1) This method works for cloudpoints with minor
        transformations. Thus, the result depents greatly on
        the initial pose estimation.
        (2) A large number of iterations does not necessarily
        ensure convergence. Contrarily, most of the time it
        produces worse results.
    '''

    src = np.array([a.T], copy=True).astype(np.float32)
    dst = np.array([b.T], copy=True).astype(np.float32)

    #Initialise with the initial pose estimation
    Tr = np.array([[np.cos(init_pose[2]),-np.sin(init_pose[2]),init_pose[0]],
                   [np.sin(init_pose[2]), np.cos(init_pose[2]),init_pose[1]],
                   [0,                    0,                   1          ]])
    #import pdb; pdb.set_trace()
    src = cv2.transform(src, Tr[0:2])
    #empty = np.zeros((730, 1300, 3))
    for i in range(no_iterations):
        empty = np.zeros((720, 1280, 3))
        #Find the nearest neighbours between the current source and the
        #destination cloudpoint
        nbrs = NearestNeighbors(n_neighbors=1, algorithm='auto').fit(dst[0])
        distances, indices = nbrs.kneighbors(src[0])
        qualified = (distances<5).ravel()
        indices = indices[qualified, :]
        
        #Compute the transformation between the current source
        #and destination cloudpoint
        T = cv2.estimateRigidTransform(src[:, qualified, :], dst[0, indices.T], True)
        #T = cv2.getPerspectiveTransform(src, dst[0, indices.T])
        #Transform the previous source and update the
        #current source cloudpoint
        src = cv2.transform(src, T)
        
        #empty[a[0,:], a[1,:], :] = (255, 0, 0)
        empty[b[0,:], b[1,:], :] = (255, 0, 0)
        idx = src[:, qualified, :].astype(int)
        idx2 = dst[0, indices.T].astype(int)
        
        #import pdb; pdb.set_trace()
        for (y, x) in idx[0, :]:
            cv2.circle(empty, (x, y), 8, (0,255,0), 1)
        for (y, x) in idx2[0, :]:
            cv2.circle(empty, (x, y), 3, (0,0,255), 1)

        cv2.imshow('icp', empty)
        k = cv2.waitKey(50) & 0xFF
        if k == 27: break
        #Save the transformation from the actual source cloudpoint
        #to the destination
        Tr = np.dot(Tr, np.vstack((T,[0,0,1])))
        print T
        #import pdb; pdb.set_trace()
    #import pdb; pdb.set_trace()
    return Tr, (0, Tr[0,2], Tr[1, 2])
Ejemplo n.º 57
0
            cv2.circle(empty, (x, y), 3, (0,0,255), 1)

        cv2.imshow('icp', empty)
        k = cv2.waitKey(50) & 0xFF
        if k == 27: break
        #Save the transformation from the actual source cloudpoint
        #to the destination
        Tr = np.dot(Tr, np.vstack((T,[0,0,1])))
        print T
        #import pdb; pdb.set_trace()
    #import pdb; pdb.set_trace()
    return Tr, (0, Tr[0,2], Tr[1, 2])

if __name__ == '__main__':
    ang = np.linspace(-np.pi/2, np.pi/2, 320)
    a = np.array([ang, np.sin(ang)])
    th = np.pi/2
    rot = np.array([[np.cos(th), -np.sin(th)],[np.sin(th), np.cos(th)]])
    b = np.dot(rot, a) + np.array([[0.2], [0.3]])

    #Run the icp
    M2 = icp(a, b, [0.1,  0.33, np.pi/2.2], 30)

    #Plot the result
    src = np.array([a.T]).astype(np.float32)
    res = cv2.transform(src, M2)
    plt.figure()
    plt.plot(b[0],b[1])
    plt.plot(res[0].T[0], res[0].T[1], 'r.')
    plt.plot(a[0], a[1])
    plt.show()
Ejemplo n.º 58
0
def rotate_landmarks(face, rotation_matrix):
    # pylint: disable=c-extension-no-member
    """ Rotate the landmarks and bounding box for faces
        found in rotated images.
        Pass in a DetectedFace object, Alignments dict or BoundingBox"""
    logger.trace("Rotating landmarks: (rotation_matrix: %s, type(face): %s",
                 rotation_matrix, type(face))
    if isinstance(face, DetectedFace):
        bounding_box = [[face.x, face.y],
                        [face.x + face.w, face.y],
                        [face.x + face.w, face.y + face.h],
                        [face.x, face.y + face.h]]
        landmarks = face.landmarksXY

    elif isinstance(face, dict):
        bounding_box = [[face.get("x", 0), face.get("y", 0)],
                        [face.get("x", 0) + face.get("w", 0),
                         face.get("y", 0)],
                        [face.get("x", 0) + face.get("w", 0),
                         face.get("y", 0) + face.get("h", 0)],
                        [face.get("x", 0),
                         face.get("y", 0) + face.get("h", 0)]]
        landmarks = face.get("landmarksXY", list())

    elif isinstance(face, BoundingBox):
        bounding_box = [[face.left, face.top],
                        [face.right, face.top],
                        [face.right, face.bottom],
                        [face.left, face.bottom]]
        landmarks = list()
    else:
        raise ValueError("Unsupported face type")

    logger.trace("Original landmarks: %s", landmarks)

    rotation_matrix = cv2.invertAffineTransform(  # pylint: disable=no-member
        rotation_matrix)
    rotated = list()
    for item in (bounding_box, landmarks):
        if not item:
            continue
        points = np.array(item, np.int32)
        points = np.expand_dims(points, axis=0)
        transformed = cv2.transform(points,  # pylint: disable=no-member
                                    rotation_matrix).astype(np.int32)
        rotated.append(transformed.squeeze())

    # Bounding box should follow x, y planes, so get min/max
    # for non-90 degree rotations
    pt_x = min([pnt[0] for pnt in rotated[0]])
    pt_y = min([pnt[1] for pnt in rotated[0]])
    pt_x1 = max([pnt[0] for pnt in rotated[0]])
    pt_y1 = max([pnt[1] for pnt in rotated[0]])

    if isinstance(face, DetectedFace):
        face.x = int(pt_x)
        face.y = int(pt_y)
        face.w = int(pt_x1 - pt_x)
        face.h = int(pt_y1 - pt_y)
        face.r = 0
        if len(rotated) > 1:
            rotated_landmarks = [tuple(point) for point in rotated[1].tolist()]
            face.landmarksXY = rotated_landmarks
    elif isinstance(face, dict):
        face["x"] = int(pt_x)
        face["y"] = int(pt_y)
        face["w"] = int(pt_x1 - pt_x)
        face["h"] = int(pt_y1 - pt_y)
        face["r"] = 0
        if len(rotated) > 1:
            rotated_landmarks = [tuple(point) for point in rotated[1].tolist()]
            face["landmarksXY"] = rotated_landmarks
    else:
        rotated_landmarks = BoundingBox(pt_x, pt_y, pt_x1, pt_y1)
        face = rotated_landmarks

    logger.trace("Rotated landmarks: %s", rotated_landmarks)
    return face
Ejemplo n.º 59
0
def red_filter(cv_pic):
    m_red = numpy.asarray([[0.0, 0.0, 0.0],
                             [0.0, 0.0, 0.0],
                             [0.0, 0.0, 1.0]])
    red = cv2.transform(cv_pic, m_red)
    return red
Ejemplo n.º 60
0
def green_filter(cv_pic):
    m_green = numpy.asarray([[0.0, 0.0, 0.0],
                             [0.0, 1.0, 0.0],
                             [0.0, 0.0, 0.0]])
    green = cv2.transform(cv_pic, m_green)
    return green