コード例 #1
0
def cut_from_rotated_rect(img_data, rot_box, expand_ratio, newH, contour, gt_pt_array):
    # extend the bbox width by a percent
    if rot_box[1][0] > rot_box[1][1]:
        rb1 = (rot_box[1][0] * expand_ratio, rot_box[1][1] * 1.1)
    else:
        rb1 = (rot_box[1][0] * 1.1, rot_box[1][1] * expand_ratio)
    rot_box = (rot_box[0], rb1, rot_box[2])
    # Get the 'contour' points
    rot_box_pts = cv2.boxPoints(rot_box).reshape((4, 1, 2))
    # Get the box width and height
    rMinD = min(rot_box[1])
    rMaxD = max(rot_box[1])
    # New width, the height is constant,setted above
    newW = float(newH) / rMinD * rMaxD
    # 2,1,0 so the whale is not upside down
    pt_orig = rot_box_pts[[2, 1, 0], 0, :]

    # find out what is the 2'nd point coordinates
    def get_dist(pt1, pt2):
        return math.sqrt(math.pow((pt1[0] - pt2[0]), 2) + math.pow((pt1[1] - pt2[1]), 2))

    d1 = get_dist(pt_orig[0], pt_orig[1])
    d2 = get_dist(pt_orig[1], pt_orig[2])
    if d1 < d2:
        mid_point = [0, newH]
    else:
        mid_point = [newW, 0]

    # create the destination coordinates
    pt_dest = np.array([[0, 0], mid_point, [newW, newH]]).astype(np.float32)
    inv_transf = cv2.getAffineTransform(pt_dest, pt_orig)
    transf = cv2.getAffineTransform(pt_orig, pt_dest)
    x1, y1 = np.meshgrid(np.arange(newW), np.arange(newH), indexing="xy")
    coord_trans = np.dstack([x1, y1])
    coord_trans2 = cv2.transform(coord_trans, inv_transf).astype(np.float32)
    transf_img = cv2.remap(img_data, coord_trans2, None, interpolation=cv2.INTER_CUBIC)
    # Transform the contour and the 2 GT points
    if contour is not None:
        transf_contour = cv2.transform(contour, transf).astype(np.int32)
    else:
        transf_contour = None

    if gt_pt_array is not None:
        transf_gt_pts = cv2.transform(gt_pt_array, transf).astype(np.int32).reshape((2, 2)).tolist()
    else:
        transf_gt_pts = None

    return transf_img, rot_box_pts, transf_contour, transf_gt_pts
コード例 #2
0
ファイル: util.py プロジェクト: dzungcamlang/Traffic-Signs
def transform_image(img, ang_range, shear_range, trans_range):
    '''
    NOTE: Some parts of this method was barrowed from:
    https://nbviewer.jupyter.org/github/vxy10/SCND_notebooks/blob/master/preprocessing_stuff/img_transform_NB.ipynb
    credit should go to the original author
    '''
    # Rotation
    ang_rot = np.random.uniform(ang_range) - ang_range / 2
    rows, cols, ch = img.shape
    Rot_M = cv2.getRotationMatrix2D((cols / 2, rows / 2), ang_rot, 1)

    # Translation
    tr_x = trans_range * np.random.uniform() - trans_range / 2
    tr_y = trans_range * np.random.uniform() - trans_range / 2
    Trans_M = np.float32([[1, 0, tr_x], [0, 1, tr_y]])

    # Shear
    pts1 = np.float32([[5, 5], [20, 5], [5, 20]])

    pt1 = 5 + shear_range * np.random.uniform() - shear_range / 2
    pt2 = 20 + shear_range * np.random.uniform() - shear_range / 2

    pts2 = np.float32([[pt1, 5], [pt2, pt1], [5, pt2]])

    shear_M = cv2.getAffineTransform(pts1, pts2)

    img = cv2.warpAffine(img, Rot_M, (cols, rows))
    img = cv2.warpAffine(img, Trans_M, (cols, rows))
    img = cv2.warpAffine(img, shear_M, (cols, rows))

    return img
コード例 #3
0
ファイル: mesh.py プロジェクト: Tiotao/FaceMorpher
def warpImage(original, feats, tri, img_path):
	image = cv2.imread(img_path)
	white = (255, 255, 255)
	rows,cols,ch = image.shape
	masked_image = np.zeros(image.shape, dtype=np.uint8)
	for t in tri:
		old_a = original[t[0]]
		old_b = original[t[1]]
		old_c = original[t[2]]
		new_a = feats[t[0]]
		new_b = feats[t[1]]
		new_c = feats[t[2]]
		pts1 = np.float32([old_a,old_b,old_c])
		pts2 = np.float32([new_a,new_b,new_c])
		M = cv2.getAffineTransform(pts1,pts2)
		dst = cv2.warpAffine(image,M,(cols,rows))
		# cv2.imshow('masked image', dst)
		mask = np.zeros(image.shape, dtype=np.uint8)
		roi_corners = np.array([[new_a, new_b, new_c]], dtype=np.int32)
		cv2.fillPoly(mask, roi_corners, white)
		masked = cv2.bitwise_and(dst, mask)
		masked_image = cv2.bitwise_or(masked_image, masked)
	# cv2.imshow('masked image', masked_image)
	# cv2.waitKey()
	# cv2.destroyAllWindows()
	return masked_image
コード例 #4
0
ファイル: custom_data_aug.py プロジェクト: legendhua/SegCaps
def elastic_transform(image, alpha=2000, sigma=40, alpha_affine=40, random_state=None):
    if random_state is None:
        random_state = np.random.RandomState(None)

    shape = image.shape
    shape_size = shape[:2]

    # Random affine
    center_square = np.float32(shape_size) // 2
    square_size = min(shape_size) // 3
    pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size])
    pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
    M = cv2.getAffineTransform(pts1, pts2)
    for i in range(shape[2]):
        image[:,:,i] = cv2.warpAffine(image[:,:,i], M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
    image = image.reshape(shape)

    blur_size = int(4*sigma) | 1

    dx = cv2.GaussianBlur((random_state.rand(*shape_size) * 2 - 1), ksize=(blur_size, blur_size), sigmaX=sigma) * alpha
    dy = cv2.GaussianBlur((random_state.rand(*shape_size) * 2 - 1), ksize=(blur_size, blur_size), sigmaX=sigma) * alpha

    x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))
    indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))

    def_img = np.zeros_like(image)
    for i in range(shape[2]):
        def_img[:,:,i] = map_coordinates(image[:,:,i], indices, order=1).reshape(shape_size)

    return def_img
コード例 #5
0
ファイル: normalizers.py プロジェクト: takiyu/DeepPose
def calc_cropping_matrix(width, height, center, joint, sigma=1.0):
    jmap = datasets.JOINT_MAP
    if 'rhip' in jmap and 'lhip' in jmap:
        diam1 = np.sqrt(np.sum((joint[jmap['lsho']] - joint[jmap['rhip']]) ** 2))
        diam2 = np.sqrt(np.sum((joint[jmap['rsho']] - joint[jmap['lhip']]) ** 2))
        diam = (diam1 + diam2) / 2.0
    else:
        diam = np.sqrt(np.sum((joint[jmap['lsho']] - joint[jmap['rsho']]) ** 2))
        diam *= 2.0
    diam *= sigma
    half_diam = diam / 2.0

    # affine transform
    src_triangle = np.array([
        [center[0] - half_diam, center[1] - half_diam],
        [center[0] + half_diam, center[1] - half_diam],
        [center[0] - half_diam, center[1] + half_diam],
    ], dtype=np.float32)
    dst_triangle = np.array([
        [0, 0],
        [width, 0],
        [0, height],
    ], dtype=np.float32)
    affine_mat = cv2.getAffineTransform(src_triangle, dst_triangle)  # 2x3
    affine_mat = np.vstack((affine_mat, [0, 0, 1]))  # 3x3

    return affine_mat
コード例 #6
0
ファイル: 4milestone5.py プロジェクト: zorawar87/SCSI15_IPCV
def TransformPos():
	origP = numpy.float32([[40, 40], [160, 40], [40, 160]])
	newP = numpy.float32([[60, 60], [180, 60], [60, 195]])

	mat = ii.getAffineTransform(origP, newP)
	warpImg = ii.warpAffine(IMG, mat, (w, h))
	ii.imshow("image", warpImg)
コード例 #7
0
def modify_affine(new, old, thr, recur):
    inlier = range(0,len(old))
    last_inlier = []
    dist_container = []
    count = 0
    M = None
    while  (len(inlier)!=len(last_inlier)):# and (count < recur):
        if len(inlier) < 0.8*len(old):
           #print len(inlier)
           thr = np.mean(dist_container) - np.std(dist_container)
           last_inlier = range(0,len(old))
           #print thr
        else:
           last_inlier = inlier
        inlier = []
        dist_container = []
        seeds = range(0,len(last_inlier))
        random.shuffle(seeds)
        seed = seeds[0:3]
        pst_new = np.array([new[last_inlier[j]] for j in seed])
        pst_old = np.array([old[last_inlier[j]] for j in seed])
        M = cv2.getAffineTransform(pst_new,pst_old)
        count = count + 1
        for k in range(0,len(last_inlier)):
            fone = np.hstack((old[last_inlier[k]],1))
            Tfone = np.transpose(fone)
            fcomp = np.dot(M,Tfone)
            dist = np.linalg.norm(old[last_inlier[k]]-fcomp[0:2])
            dist_container.append(dist)
            if dist < thr:
               inlier.append(last_inlier[k])
    return M
コード例 #8
0
def faceclone(src_name, dst_name):
    src_img = cv2.imread(src_name)
    dst_img = cv2.imread(dst_name)

    src_rst = api.detection.detect(img = File(src_name), attribute='pose')
    src_img_width   = src_rst['img_width']
    src_img_height  = src_rst['img_height']
    src_face        = src_rst['face'][0]

    dst_rst = api.detection.detect(img = File(dst_name), attribute='pose')
    dst_img_width   = dst_rst['img_width']
    dst_img_height  = dst_rst['img_height']
    dst_face        = dst_rst['face'][0]

    ss = np.array(get_feature_points(src_face, src_img_width, src_img_height), dtype=np.float32)
    ps = np.array(get_feature_points(dst_face, dst_img_width, dst_img_height), dtype=np.float32)
    map_matrix = cv2.getAffineTransform(ps, ss)

    #dsize = (300,300)
    map_result = cv2.warpAffine(dst_img, map_matrix, dsize=(src_img_width,src_img_height))
    
    extract_mask, center = contour.extract_face_mask(src_face['face_id'], src_img_width, src_img_height, src_name)
    # merge 
    ## first blending the border
    extract_alpha = contour.extract_face_alpha(src_face['face_id'], src_img_width, src_img_height, src_name)
    center = (map_result.shape[0]/2, map_result.shape[1]/2)
    map_result = cv2.seamlessClone(src_img, map_result, extract_mask, center, flags=cv2.NORMAL_CLONE)

    imap_matrix = cv2.invertAffineTransform(map_matrix)
    final = cv2.warpAffine(map_result, imap_matrix, dsize=(dst_img.shape[0:2]))
    return final
コード例 #9
0
    def transform_map(self):
        self.triangulate()

        slam_map = cv2.imread(self.img_2, 0)
        rows, cols = cv2.imread(self.img_1, 0).shape
        output = np.zeros((rows, cols), np.uint8)
        slam_nodes = self.nodes("register/slam.1.node")
        semantic_nodes = self.nodes("register/semantic.1.node")
        # From the ele file, color triangles
        first_line = True
        with open("register/slam.1.ele", 'r') as f:
            for line in f:
                if first_line:
                    # Do nothing
                    first_line = False 
                elif '#' not in line:
                    # This line is not a comment
                    s = line.split()
                    node_index_1 = int(s[1])
                    node_index_2 = int(s[2])
                    node_index_3 = int(s[3])
                    slam_pts = [slam_nodes[node_index_1], slam_nodes[node_index_2], slam_nodes[node_index_3]]
                    semantic_pts = [semantic_nodes[node_index_1], semantic_nodes[node_index_2], semantic_nodes[node_index_3]]
                    transform = cv2.getAffineTransform(np.array(slam_pts, dtype='float32'), np.array(semantic_pts, dtype='float32'))
                    if transform != None:
                        all_transformed = cv2.warpAffine(slam_map, transform, (cols, rows))
                        area = np.array(semantic_pts, dtype='int32')
                        area = area.reshape((-1, 1, 2))
                        mask = np.zeros((rows, cols), np.uint8)
                        cv2.fillPoly(mask, [area], 255)
                        tmp = cv2.bitwise_and(all_transformed, mask)
                        output = cv2.add(tmp, output)
            cv2.imshow('Output', output)
            cv2.waitKey(0)
            cv2.destroyAllWindows()
コード例 #10
0
ファイル: image.py プロジェクト: plr123/video-analysis
def get_subimage(img, slice_x, slice_y, width=None, height=None):
    """
    extracts the subimage specified by `slice_x` and `slice_y`.
    Optionally, the image can also be resampled, by specifying a different
    number of pixels in either direction using the last two arguments
    """
    p1_x, p2_x = slice_x[:2]
    p1_y, p2_y = slice_y[:2]

    if width is None:
        width = p2_x - p1_x
    
    if height is None:
        height = (p2_y - p1_y) * width / (p2_x - p1_x)
    
    # get corresponding points between the two images
    pts1 = np.array(((p1_x, p1_y), (p1_x, p2_y), (p2_x, p1_y)), np.float32)
    pts2 = np.array(((0, 0), (height, 0), (0, width)), np.float32)
    
    # determine and apply the affine transformation
    matrix = cv2.getAffineTransform(pts1, pts2)
    res = cv2.warpAffine(img, matrix, (int(round(height)), int(round(width))))

    # return the profile
    return res
コード例 #11
0
ファイル: dlib.py プロジェクト: davidfischer-ch/pytoolbox
    def align(self, image, box, dimension=96, landmark_indices=None, landmarks=None):
        """
        Transform and align a face in an image.

        :param image: RGB image to process. Shape: (height, width, 3)
        :type image: numpy.ndarray
        :param box: Bounding box around the face to align.
        :type box: dlib.rectangle
        :type dimension: int
        :param landmark_indices: The indices to transform to.
        :type landmark_indices: list of ints
        :param dimension: The edge length in pixels of the square the image is resized to.
        :param landmarks: Detected landmark locations. \
                          Landmarks found on `box` if not provided.
        :type landmarks: list of (x,y) tuples
        :return: The aligned RGB image. Shape: (dimension, dimension, 3)
        :rtype: numpy.ndarray
        """
        if landmark_indices is None:
            landmark_indices = self.OUTER_EYES_AND_NOSE
        if landmarks is None:
            landmarks = self.find_landmarks(image, box)

        landmarks = np.float32(landmarks)
        landmark_indices = np.array(landmark_indices)

        H = cv2.getAffineTransform(
            landmarks[landmark_indices],
            dimension * MINMAX_TEMPLATE[landmark_indices])

        return cv2.warpAffine(image, H, (dimension, dimension))
コード例 #12
0
def elastic_transform(image, alpha, sigma, alpha_affine, random_state=None):
    """Elastic deformation of images as described in [Simard2003]_ (with modifications).
    .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
         Convolutional Neural Networks applied to Visual Document Analysis", in
         Proc. of the International Conference on Document Analysis and
         Recognition, 2003.

     Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
    """
    if random_state is None:
        random_state = np.random.RandomState(None)

    shape = image.shape
    shape_size = shape[:2]
    
    # Random affine
    center_square = np.float32(shape_size) // 2
    square_size = min(shape_size) // 3
    pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size])
    pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
    M = cv2.getAffineTransform(pts1, pts2)
    image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)

    dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
    dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
    dz = np.zeros_like(dx)

    x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
    indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)), np.reshape(z, (-1, 1))

    return map_coordinates(image, indices, order=1, mode='reflect').reshape(shape)
コード例 #13
0
ファイル: gvutils.py プロジェクト: gauresh10/Opencv_basic_lib
    def affTransform(self,img,operate="",*arg):
        rows,cols=img.shape[:2]
        points=[]
        for ar in arg:
            points.append(ar)
            print ar
        sr1,sr2,sr3=(0,0),(0,0),(0,0)
        ds1,ds2,ds3=(0,0),(0,0),(0,0)
        if(len(operate)!=0 ):
            if(operate=="H_FLIP"):
                sr1,sr2,sr3=(0,0),(cols-1,0),(0,rows-1)
                ds1,ds2,ds3=(cols-1,0),(0,0),(cols-1,rows-1)
            elif(operate=="V_FLIP"):
                sr1,sr2,sr3=(0,0),(cols-1,0),(0,rows-1)
                ds1,ds2,ds3=(0,rows-1),(cols-1,rows-1),(0,0)
        else:
            if(len(arg)==6):
                sr1,sr2,sr3=points[0],points[1],points[2]
                ds1,ds2,ds3=points[3],points[4],points[5]

        src_points = np.float32([sr1, sr2, sr3])
        dst_points = np.float32([ds1,ds2,ds3])
        affine_matrix = cv2.getAffineTransform(src_points, dst_points)
        img_output = cv2.warpAffine(img, affine_matrix, (cols,rows))
        return img_output
コード例 #14
0
def transform_image(image,ang_range,shear_range,trans_range):

    # Rotation
    ang_rot = np.random.uniform(ang_range)-ang_range/2
    rows,cols,ch = image.shape    
    Rot_M = cv2.getRotationMatrix2D((cols/2,rows/2),ang_rot,1)

    # Translation
    tr_x = trans_range*np.random.uniform()-trans_range/2
    tr_y = trans_range*np.random.uniform()-trans_range/2
    Trans_M = np.float32([[1,0,tr_x],[0,1,tr_y]])

    # Shear
    pts1 = np.float32([[5,5],[20,5],[5,20]])

    pt1 = 5+shear_range*np.random.uniform()-shear_range/2
    pt2 = 20+shear_range*np.random.uniform()-shear_range/2

    pts2 = np.float32([[pt1,5],[pt2,pt1],[5,pt2]])

    shear_M = cv2.getAffineTransform(pts1,pts2)
        
    image = cv2.warpAffine(image,Rot_M,(cols,rows))
    image = cv2.warpAffine(image,Trans_M,(cols,rows))
    image = cv2.warpAffine(image,shear_M,(cols,rows))
    
    return image
コード例 #15
0
def get_warped(img):
    
    rows,cols,_ = img.shape

    # random scaling coefficients
    rndx = np.random.rand(3) - 0.5
    rndx *= cols * 0.06   # this coefficient determines the degree of warping
    rndy = np.random.rand(3) - 0.5
    rndy *= rows * 0.06

    # 3 starting points for transform, 1/4 way from edges
    x1 = cols/4
    x2 = 3*cols/4
    y1 = rows/4
    y2 = 3*rows/4

    pts1 = np.float32([[y1,x1],
                       [y2,x1],
                       [y1,x2]])
    pts2 = np.float32([[y1+rndy[0],x1+rndx[0]],
                       [y2+rndy[1],x1+rndx[1]],
                       [y1+rndy[2],x2+rndx[2]]])

    M = cv2.getAffineTransform(pts1,pts2)

    dst = cv2.warpAffine(img,M,(cols,rows))
    
    dst = dst[:,:,np.newaxis]
    
    return dst
コード例 #16
0
ファイル: alignment.py プロジェクト: fonfonx/RSC_python
def warp_image(img, triangulation, base_points, coord):
    """
    Realize the mesh warping phase

    triangulation is the Delaunay triangulation of the base points
    base_points are the coordinates of the landmark poitns of the reference image

    code inspired from http://www.learnopencv.com/warp-one-triangle-to-another-using-opencv-c-python/
    """
    all_points, coordinates = preprocess_image_before_triangulation(img)
    img_out = 255 * np.ones(img.shape, dtype=img.dtype)
    for t in triangulation:
        # triangles to map one another
        src_tri = np.array([[all_points[x][0], all_points[x][1]] for x in t]).astype(np.float32)
        dest_tri = np.array([[base_points[x][0], base_points[x][1]] for x in t]).astype(np.float32)
        # bounding boxes
        src_rect = cv2.boundingRect(np.array([src_tri]))
        dest_rect = cv2.boundingRect(np.array([dest_tri]))

        # crop images
        src_crop_tri = np.zeros((3, 2), dtype=np.float32)
        dest_crop_tri = np.zeros((3, 2))
        for k in range(0, 3):
            for dim in range(0, 2):
                src_crop_tri[k][dim] = src_tri[k][dim] - src_rect[dim]
                dest_crop_tri[k][dim] = dest_tri[k][dim] - dest_rect[dim]

        src_crop_img = img[src_rect[1]:src_rect[1] + src_rect[3], src_rect[0]:src_rect[0] + src_rect[2]]

        # affine transformation estimation
        mat = cv2.getAffineTransform(
            np.float32(src_crop_tri),
            np.float32(dest_crop_tri)
        )
        dest_crop_img = cv2.warpAffine(
            src_crop_img,
            mat,
            (dest_rect[2], dest_rect[3]),
            None,
            flags=cv2.INTER_LINEAR,
            borderMode=cv2.BORDER_REFLECT_101
        )

        # Use a mask to keep only the triangle pixels
        # Get mask by filling triangle
        mask = np.zeros((dest_rect[3], dest_rect[2], 3), dtype=np.float32)
        cv2.fillConvexPoly(mask, np.int32(dest_crop_tri), (1.0, 1.0, 1.0), 16, 0)

        # Apply mask to cropped region
        dest_crop_img = dest_crop_img * mask

        # Copy triangular region of the rectangular patch to the output image
        img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] = \
            img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] * (
                (1.0, 1.0, 1.0) - mask)

        img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] = \
            img_out[dest_rect[1]:dest_rect[1] + dest_rect[3], dest_rect[0]:dest_rect[0] + dest_rect[2]] + dest_crop_img

    return img_out[coord[2]:coord[3], coord[0]:coord[1]]
コード例 #17
0
def extract_time_window(contour_list, hierarchy_list, original_image, \
    max_error):
    time_handles = get_time_handles(contour_list, hierarchy_list, max_error)

    if time_handles == None:
        return((1, None, 'No time handle detected'))

    time_handle_centres = [get_centre_of_contour(contour_list[handle[2]]) \
        for handle in time_handles]

    distance = abs(time_handle_centres[1][0] - time_handle_centres[0][0])
    one_unit = (distance / (81.5/2.3))
    total_vertical_distance = int(numpy.ceil(17.0 * one_unit))
    total_horizontal_distance = int(numpy.ceil((11.0+(81.5/2.3)) * one_unit))

    short_horizontal_distance = 5.5 * one_unit
    long_vertical_distance = 12.5 * one_unit
    long_horizontal_distance = (5.5+(81.5/2.3)) * one_unit

    model_points = numpy.array([ [short_horizontal_distance, \
        short_horizontal_distance], [long_horizontal_distance, \
        short_horizontal_distance], [(short_horizontal_distance + \
        long_horizontal_distance) / 2, long_vertical_distance] ], \
        dtype='float32')
    time_handle_points = numpy.array(time_handle_centres, dtype='float32')

    M = cv2.getAffineTransform(time_handle_points, model_points)

    rotated_cropped_img = cv2.warpAffine( numpy.copy(original_image), M, \
        (total_horizontal_distance, total_vertical_distance) )
    return((0, rotated_cropped_img, 'Success'))
コード例 #18
0
    def _process(self):
        w = self.display.widget
        img = w.image
        out = []
        v = self.pRef.value()

        if v == 'Reference points':
            r0, r1 = self._refPn
            pts0 = np.array([(h['pos'].y(), h['pos'].x())
                             for h in r0.handles]) + r0.pos()
            pts1 = np.array([(h['pos'].y(), h['pos'].x())
                             for h in r1.handles]) + r1.pos()
            #TODO: embed in PyerspectiveCorrection
            M = cv2.getAffineTransform(
                pts0.astype(
                    np.float32), pts1.astype(
                    np.float32))
            for n, i in enumerate(img):
                out.append(
                    # TODO: allow different image shapes
                    cv2.warpAffine(i, M, w.image.shape[1:3],
                                   borderValue=0))
                print(out[-1].shape)
        else:
            r = v == 'Reference image'
            e = self.pExecOn.value()
            for n, i in enumerate(img):
                if (e == 'all images'
                    or (e == 'current image' and n == w.currentIndex)
                        or (e == 'last image' and n == len(img) - 1)):

                    if not (r and n == self._refImg_from_own_display):
                        out.append(self.pc.correct(i))
        return out
コード例 #19
0
def extract_algae_window(contour_list, hierarchy_list, original_image, \
    max_error):
    corners = get_corner_handles(contour_list, hierarchy_list, max_error)
    if corners == None:
        return((1, None, 'No corners detected'))
    corners = sort_corner_handles(corners, contour_list)
    corner_centres = [get_centre_of_contour(contour_list[corner[2]]) \
        for corner in corners]

    distance = max(
        ((corner_centres[0][0] - corner_centres[1][0])**2 +
         (corner_centres[0][1] - corner_centres[1][1])**2)**0.5,
        ((corner_centres[2][0] - corner_centres[1][0])**2 +
         (corner_centres[2][1] - corner_centres[1][1])**2)**0.5)

    short_distance = 5.5*(distance/32.0)
    long_distance = 37.5*(distance/32.0)
    total_distance = int(numpy.ceil(43.0*(distance/32.0)))

    model_points = numpy.array([ [short_distance,long_distance], \
        [short_distance,short_distance], [long_distance,short_distance] ], \
        dtype='float32')
    corner_points = numpy.array(corner_centres, dtype='float32')

    M = cv2.getAffineTransform(corner_points, model_points)

    rotated_cropped_img = cv2.warpAffine( numpy.copy(original_image), M, \
        (total_distance, total_distance) )
    return((0, rotated_cropped_img, 'Success'))
コード例 #20
0
def getNormalizedLandmarks(img, predictor, d, fronter = None, win2 = None):
    shape = predictor(img, d)
    landmarks = list(map(lambda p: (p.x, p.y), shape.parts()))
    npLandmarks = np.float32(landmarks)
    if NORM_MODE == 0:
        npLandmarkIndices = np.array(landmarkIndices)            
        H = cv2.getAffineTransform(npLandmarks[npLandmarkIndices],
                                MINMAX_TEMPLATE[npLandmarkIndices])
        normLM = cv2.transform(np.asarray([npLandmarks]),H)[0,:,:]
        return normLM,shape
    else:
        assert fronter is not None
        thumbnail = fronter.frontalizeImage(img,d,npLandmarks)
        #thumbnail = imgEnhance(thumbnail)
        cut = thumbnail.shape[0]/5
        thumbnail = thumbnail[cut+5:thumbnail.shape[0]-cut-5,cut+10:thumbnail.shape[1]-cut-10,:].copy()
        newShape = predictor(thumbnail, dlib.rectangle(0,0,thumbnail.shape[0],thumbnail.shape[1]))
        if win2 is not None:
            win2.clear_overlay()
            win2.set_image(thumbnail)
            win2.add_overlay(newShape)
            #dlib.hit_enter_to_continue()
        landmarks = list(map(lambda p: (float(p.x)/thumbnail.shape[0], float(p.y)/thumbnail.shape[1]), newShape.parts()))
        npLandmarks = np.float32(landmarks)
        normLM = npLandmarks
        return normLM,shape,thumbnail
コード例 #21
0
ファイル: map_utils.py プロジェクト: Hukongtao/models
def get_map_to_predict(src_locs, src_x_axiss, src_y_axiss, map, map_size,
                       interpolation=cv2.INTER_LINEAR):
  fss = []
  valids = []

  center = (map_size-1.0)/2.0
  dst_theta = np.pi/2.0
  dst_loc = np.array([center, center])
  dst_x_axis = np.array([np.cos(dst_theta), np.sin(dst_theta)])
  dst_y_axis = np.array([np.cos(dst_theta+np.pi/2), np.sin(dst_theta+np.pi/2)])

  def compute_points(center, x_axis, y_axis):
    points = np.zeros((3,2),dtype=np.float32)
    points[0,:] = center
    points[1,:] = center + x_axis
    points[2,:] = center + y_axis
    return points

  dst_points = compute_points(dst_loc, dst_x_axis, dst_y_axis)
  for i in range(src_locs.shape[0]):
    src_loc = src_locs[i,:]
    src_x_axis = src_x_axiss[i,:]
    src_y_axis = src_y_axiss[i,:]
    src_points = compute_points(src_loc, src_x_axis, src_y_axis)
    M = cv2.getAffineTransform(src_points, dst_points)

    fs = cv2.warpAffine(map, M, (map_size, map_size), None, flags=interpolation,
                        borderValue=np.NaN)
    valid = np.invert(np.isnan(fs))
    valids.append(valid)
    fss.append(fs)
  return fss, valids
コード例 #22
0
ファイル: register.py プロジェクト: jordanwesthoff/scantron
def register(fid1, fid2, fid3, image, blank):
   numRows, numCols, numBands, dtype = ipcv.dimensions(blank)
   
   blank1row, blank1col = ipcv.fftCorrelation2(fid1,blank)
   blank2row, blank2col = ipcv.fftCorrelation2(fid2,blank)
   blank3row, blank3col = ipcv.fftCorrelation2(fid3,blank)

   print 'blank1row', blank1row
   print 'blank1col', blank1col
   print 'blank2row', blank2row
   print 'blank2col', blank2col
   print 'blank3row', blank3row
   print 'blank3col', blank3col
  

   #blankfid1 = numpy.array([738,60])
   #blankfid2 = numpy.array([738,542])
   #blankfid3 = numpy.array([53,556])
   
   #print blankfid2.shape

   fid1row, fid1col = ipcv.fftCorrelation2(fid1,image)
   fid2row, fid2col = ipcv.fftCorrelation2(fid2,image)
   fid3row, fid3col = ipcv.fftCorrelation2(fid3,image)

   print 'fid1row', fid1row
   print 'fid1col', fid1col
   print 'fid2row', fid2row
   print 'fid2col', fid2col
   print 'fid3row', fid3row
   print 'fid3col', fid3col
   
   numRowsIm, numColsIm, numBandsIm, dataTypeIm = ipcv.dimensions(image)
   
   #Rotated 180 degrees
   if fid2row - 25 < fid1row < fid2row + 25 and fid3col - 25 < fid2col < fid3col + 25 and fid2row < numRowsIm/2 and fid3col < numColsIm/2:
      print 'hello'
      image = numpy.rot90(image, k=2)
      cv2.namedWindow('image', cv2.WINDOW_AUTOSIZE)
      cv2.imshow('image', image)
      fid1row, fid1col = ipcv.fftCorrelation2(fid1,image)
      fid2row, fid2col = ipcv.fftCorrelation2(fid2,image)
      fid3row, fid3col = ipcv.fftCorrelation2(fid3,image)
   
   
   blankpts = numpy.array([[blank1row,blank1col],[blank2row,blank2col],[blank3row,blank3col]]).astype(numpy.float32)
   #blankpts = numpy.array([blankfid1,blankfid2,blankfid3]).astype(numpy.float32)
   #print blankpts.shape
   fidpts = numpy.array([[fid1row,fid1col],[fid2row,fid2col],[fid3row,fid3col]]).astype(numpy.float32)
   #print fidpts.shape
   M = cv2.getAffineTransform(blankpts,fidpts)
   regIm = cv2.warpAffine(image,M,(numCols,numRows), borderMode = cv2.BORDER_TRANSPARENT)
   
   cv2.namedWindow('rot',cv2.WINDOW_AUTOSIZE)
   cv2.imshow('rot',regIm.astype(numpy.uint8))
   cv2.waitKey()

   cv2.imwrite('new0001.tif', regIm.astype(numpy.uint8))

   return regIm
コード例 #23
0
ファイル: Textures.py プロジェクト: a-klek/course_work
def draw(photo_file, cloud, alfa, betta, gamma):#Функция, собирающая всё вместе
    corners = getCorners(photo_file)#взяли углы с фотки
    cloud = rotateCloud(cloud, alfa, betta, gamma)#повернули облако на тот угол, с которого делалась фотография
    pr=[]
    
    for i in range(len(cloud)):#берём проекцию
        p = Point2(cloud[i].x+270,cloud[i].z+300)#числа взяты так, чтобы проекция рисовалась примерно по центру
        pr.append(p)
   
    conf_pr = getConformity(pr, corners)#сопоставляем проекцию и углы
    triangles = []
    meshes = getMeshes('cube.jpg', corners, 10, 10)#запоминаем нужные грани
    for i in range(len(meshes)):#сопоставляем треугольники с проекции облака и треугольники с фотографии, 
                                #чтобы удобнее было копировать кусочки изображения
        trngl=[]
        
        for j in range(3):
            for k in range(len(corners)):
                if meshes[i][j].x == corners[k].x and meshes[i][j].y == corners[k].y:
                    trngl.append(conf_pr[k])
        triangles.append(trngl)
     
    #наложение текстур
    image = cv2.imread(photo_file) 
    rows,cols,ch = image.shape
    new_image = numpy.zeros(image.shape, numpy.uint8)
    new_image = cv2.bitwise_not(new_image) 
    for i in range(len(meshes)):
        #точки треугольника с фотографии
        x1 = meshes[i][0].x
        y1 = meshes[i][0].y
        x2 = meshes[i][1].x
        y2 = meshes[i][1].y
        x3 = meshes[i][2].x
        y3 = meshes[i][2].y
        pts1 = numpy.float32([[x1,y1],[x2,y2],[x3,y3]])
        roi_corners = numpy.array([[(x1,y1), (x2,y2), (x3,y3)]], dtype=numpy.int32)
        mask = numpy.zeros(image.shape, dtype=numpy.uint8)#маска для фотографии
        #точки треугольника проекции облака
        X1 = triangles[i][0].x
        Y1 = triangles[i][0].y
        X2 = triangles[i][1].x
        Y2 = triangles[i][1].y
        X3 = triangles[i][2].x
        Y3 = triangles[i][2].y       
        pts2 = numpy.float32([[X1,Y1],[X2,Y2],[X3,Y3]])
        roi2_corners = numpy.array([[(X1,Y1), (X2,Y2), (X3,Y3)]], dtype=numpy.int32)
        mask2 = numpy.zeros(new_image.shape, dtype=numpy.uint8)#маска для места, куда вставим изображение
        
        cv2.fillPoly(mask, roi_corners, (255,255,255))#создаём маску
        masked_image = cv2.bitwise_and(image, mask)#применяем маску к фотографии
        M = cv2.getAffineTransform(pts1,pts2)#применяем аффинные преобразования
        warp_affin_img = cv2.warpAffine(masked_image,M,(cols,rows))
        
        cv2.fillPoly(mask2, roi2_corners, (255,255,255))#создаём вторую маску
        mask2 = cv2.bitwise_not(mask2)#инвентируем для обратного эффекта (заполнять нужно то, что вне треугольника)
        new_image = cv2.bitwise_and(new_image, mask2)#применяем маску к прекции
        new_image = cv2.bitwise_or(new_image, warp_affin_img)#объединяем изображения
    cv2.imshow('result',new_image)
コード例 #24
0
ファイル: rotate.py プロジェクト: gszxwd/Test
def affine(im):
    height,width=im.shape
    angle=getAngle(im)
    pts1=np.float32([[width/2,height/2],[width/2,0],[0,height/2]])
    pts2=np.float32([[width/2,height/2],[width/2+height/2/math.tan(math.radians(angle)),0],[0,height/2]])
    M=cv2.getAffineTransform(pts1,pts2)
    dst=cv2.warpAffine(im,M,(width,height))
    return dst
コード例 #25
0
ファイル: faceMorph.py プロジェクト: wwwins/OpenCV-Samples
def applyAffineTransform(src, srcTri, dstTri, size):
    # Given a pair of triangles, find the affine transform.
    warpMat = cv2.getAffineTransform(np.float32(srcTri), np.float32(dstTri))

    # Apply the Affine Transform just found to the src image
    dst = cv2.warpAffine(src, warpMat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)

    return dst
コード例 #26
0
ファイル: main.py プロジェクト: NeedFR/facemorpher
def warpImage(orig, features, diang, src):
	image = cv2.imread(src)
	masked_image = np.zeros(image.shape, dtype=np.uint8)
	for t in diang:
		mask = np.zeros(image.shape, dtype=np.uint8)
		cv2.fillPoly(mask, np.array([[features[t[0]], features[t[1]], features[t[2]]]], dtype=np.int32), (255, 255, 255))
		masked_image = cv2.bitwise_or(masked_image, cv2.bitwise_and(cv2.warpAffine(image, cv2.getAffineTransform(np.float32([orig[t[0]], orig[t[1]], orig[t[2]]]), np.float32([features[t[0]], features[t[1]], features[t[2]]])), (image.shape[1],image.shape[0])), mask))
	return masked_image
コード例 #27
0
def render_image(q, image, dims, dst=None):
    # take query bounding box (world coordinates)
    qbb = q['bbox']
    qbb_wpts = [[qbb[0], qbb[2]], [qbb[1], qbb[2]], [qbb[1], qbb[3]]]
    #im_to_q_w = cv2.getAffineTransform(
    #    numpy.array(ibb_wpts, dtype='f4'),
    #    numpy.array(qbb_wpts, dtype='f4'))
    #print("im_to_q_w {}".format(im_to_q_w))
    # affine for world to tile
    tpts = [[0, 0], [dims[0], 0], [dims[0], dims[1]]]
    w_to_t = cv2.getAffineTransform(
        numpy.array(qbb_wpts, dtype='f4'),
        numpy.array(tpts, dtype='f4'))
    #print("w_to_t {}".format(w_to_t))
    # generate affine (2) to lay image (image) onto (world)
    #im = open_tif(image['url']['0'])[::4, ::4]
    im = open_image(image['url']['0'])
    imshape = im.shape
    ipts = [[0, 0], [imshape[1], 0], [imshape[1], imshape[0]]]
    # generate affine (1) to lay image bounding box (world) onto query
    ibb = image['bbox']
    ibb_wpts = [
        [ibb['left'], ibb['north']], [ibb['right'], ibb['north']],
        [ibb['right'], ibb['south']]]
    im_to_w = cv2.getAffineTransform(
        numpy.array(ipts, dtype='f4'),
        numpy.array(ibb_wpts, dtype='f4'))
    #print("im_to_w {}".format(im_to_w))
    # combine affines im_to_w -> im_to_q_w -> q_to_t
    im_to_t = combine_affines(w_to_t, im_to_w)
    #print("im_to_t {}".format(im_to_t))
    md = 1. / max(abs(im_to_t[0, 0]), abs(im_to_t[1, 1]))
    if md > 1:
        s = min(int(md), im.shape[0] / 4, im.shape[1] / 4)
        im_to_t[0, 0] = im_to_t[0, 0] * s
        im_to_t[1, 1] = im_to_t[1, 1] * s
        im = im[::s, ::s]
    # convert image
    # calculate original image bbox to world coordinates
    # convert
    #if dst is None:
    #    return cv2.warpAffine(im.astype('f8'), im_to_t, dims)
    if dst is None:
        return cv2.warpAffine(im, im_to_t, dims)
    return cv2.warpAffine(
        im, im_to_t, dims, dst=dst, borderMode=cv2.BORDER_TRANSPARENT)
コード例 #28
0
def imgChangeUsingWarpAffine(img, topLeft1, bottomRight1, topLeft2, bottomRight2):
    rows, cols, ch = img.shape

    pts1 = np.float32([[topLeft1[0], topLeft1[1]], [bottomRight1[0], bottomRight1[1]], [bottomRight1[0], topLeft1[1]]])
    pts2 = np.float32([[topLeft2[0], topLeft2[1]], [bottomRight2[0], bottomRight2[1]], [bottomRight2[0], topLeft2[1]]])

    M = cv2.getAffineTransform(pts1, pts2)
    img = cv2.warpAffine(img, M, (cols, rows))
    return img
コード例 #29
0
ファイル: getnumber.py プロジェクト: haojijun/meter-viewer
def getSingleNumber( img, cont ):
    """
    single number ROI size: 60x100
    """
    #step 1. get single number roi
    pos1 = np.float32( [ [cont[0],0],
                         [cont[0]+cont[2],0],
                         [cont[0]+cont[2],99] ] )
    pos2 = np.float32( [ [0,0],
                         [59,0],
                         [59,99] ] )
    M = cv2.getAffineTransform( pos1, pos2 )
    roi_sn = cv2.warpAffine( img, M, (60,100),
                             borderValue=cv2.cv.ScalarAll(255) )

    #step 2. get 7 segments of single number
    seg7 = 7*[0]
    if sum( roi_sn[25][0:20] ) > 0: #f,5
        seg7[5] = 1   
    if sum( roi_sn[25][40:60] ) > 0: #b,1
        seg7[1] = 1   
    if sum( roi_sn[75][0:20] ) > 0: #e,4
        seg7[4] = 1   
    if sum( roi_sn[75][40:60] ) > 0: #c,2
        seg7[2] = 1
    #adjust for "7." or "L"
    x_ad = 0
    if seg7[4]==0 and seg7[5]==0:
        x_ad -= 5
    if seg7[1]==0 and seg7[2]==0:
        x_ad += 5 
    if sum([ a[30+x_ad] for a in roi_sn[0:20] ]) > 0: #a,0
        seg7[0] = 1       
    if sum([ a[30+x_ad] for a in roi_sn[40:60] ]) > 0: #g,6
        seg7[6] = 1
    if sum([ a[30+x_ad] for a in roi_sn[80:100] ]) > 0: #d,3
        seg7[3] = 1   

    #for debug
    if DEBUG_MODE:
        cv2.imshow( "roi_sn", roi_sn ) 

    try:
        i = number_mask.index( tuple(seg7) )
        if i==10:
            return "C"
        elif i==11:
            return "A"
        elif i==12:
            return "L"
        elif i==13:
            return "-1"
        else:
            return str( i )
    except:
        return "*"
コード例 #30
0
ファイル: openface_wrapper.py プロジェクト: ngeiswei/HEAD
 def align(self, img, landmarks, net=False):
     npLandmarks = np.float32(landmarks)
     npLandmarksIndices = np.array(self.outer_eyes_and_nose)
     H = cv2.getAffineTransform(npLandmarks[npLandmarksIndices],
                                96 * MINMAX_TEMPLATE[npLandmarksIndices])
     thumbnail = cv2.warpAffine(img, H, (96, 96))
     if (net):
         thumbnail = self.net.forward(thumbnail).reshape(1, -1)
         return thumbnail
     return thumbnail
コード例 #31
0
def find_table(img, display_list):
    #mask_screen = find_screen(img, display_list)
    ## find white border
    DoB = zc.get_DoB(img, 1, 31, method='Average')
    zc.check_and_display('DoB',
                         DoB,
                         display_list,
                         resize_max=config.DISPLAY_MAX_PIXEL,
                         wait_time=config.DISPLAY_WAIT_TIME)
    mask_white = zc.color_inrange(DoB, 'HSV', V_L=10)
    mask_white = cv2.morphologyEx(mask_white,
                                  cv2.MORPH_CLOSE,
                                  zc.generate_kernel(7, 'circular'),
                                  iterations=1)
    zc.check_and_display_mask('mask_white_raw',
                              img,
                              mask_white,
                              display_list,
                              resize_max=config.DISPLAY_MAX_PIXEL,
                              wait_time=config.DISPLAY_WAIT_TIME)

    ## find purple table (roughly)
    mask_table = zc.color_inrange(img,
                                  'HSV',
                                  H_L=130,
                                  H_U=160,
                                  S_L=60,
                                  V_L=50,
                                  V_U=240)
    mask_table, _ = zc.get_big_blobs(mask_table, min_area=50)
    mask_table = cv2.morphologyEx(mask_table,
                                  cv2.MORPH_CLOSE,
                                  zc.generate_kernel(7, 'circular'),
                                  iterations=1)
    mask_table, _ = zc.find_largest_CC(mask_table)
    if mask_table is None:
        rtn_msg = {'status': 'fail', 'message': 'Cannot find table'}
        return (rtn_msg, None)
    mask_table_convex, _ = zc.make_convex(mask_table.copy(), app_ratio=0.005)
    mask_table = np.bitwise_or(mask_table, mask_table_convex)
    mask_table_raw = mask_table.copy()
    zc.check_and_display_mask('table_purple',
                              img,
                              mask_table,
                              display_list,
                              resize_max=config.DISPLAY_MAX_PIXEL,
                              wait_time=config.DISPLAY_WAIT_TIME)

    ## fine tune the purple table based on white border
    #mask_white = np.bitwise_and(np.bitwise_not(mask_table), mask_white)
    #mask_white = np.bitwise_and(np.bitwise_not(zc.shrink(mask_table, 3, method = "circular")), mask_white)
    mask_white = np.bitwise_and(np.bitwise_not(mask_table), mask_white)
    if 'mask_white' in display_list:
        gray = np.float32(mask_white)
        dst = cv2.cornerHarris(gray, 10, 3, 0.04)
        dst = cv2.dilate(dst, None)
        img_white = img.copy()
        img_white[mask_white > 0, :] = [0, 255, 0]
        #img_white[dst > 2.4e7] = [0, 0, 255]
        zc.check_and_display('mask_white',
                             img_white,
                             display_list,
                             resize_max=config.DISPLAY_MAX_PIXEL,
                             wait_time=config.DISPLAY_WAIT_TIME)
    #mask_table, _ = zc.make_convex(mask_table, app_ratio = 0.005)
    for i in xrange(15):
        mask_table = zc.expand(mask_table, 3)
        mask_table = np.bitwise_and(np.bitwise_not(mask_white), mask_table)
        if i % 4 == 3:
            mask_table, _ = zc.make_convex(mask_table, app_ratio=0.01)
            #img_display = img.copy()
            #img_display[mask_table > 0, :] = [0, 0, 255]
            #zc.display_image('table%d-b' % i, img_display, resize_max = config.DISPLAY_MAX_PIXEL, wait_time = config.DISPLAY_WAIT_TIME)
            #mask_white = np.bitwise_and(np.bitwise_not(mask_table), mask_white)
            mask_table = np.bitwise_and(np.bitwise_not(mask_white), mask_table)
    mask_table, _ = zc.find_largest_CC(mask_table)
    if mask_table is None:
        rtn_msg = {'status': 'fail', 'message': 'Cannot find table'}
        return (rtn_msg, None)
    mask_table, hull_table = zc.make_convex(mask_table, app_ratio=0.01)
    zc.check_and_display_mask('table_purple_fixed',
                              img,
                              mask_table,
                              display_list,
                              resize_max=config.DISPLAY_MAX_PIXEL,
                              wait_time=config.DISPLAY_WAIT_TIME)

    ## check if table is big enough
    table_area = cv2.contourArea(hull_table)
    table_area_percentage = float(table_area) / img.shape[0] / img.shape[1]
    if table_area_percentage < 0.06:
        rtn_msg = {
            'status': 'fail',
            'message': "Detected table too small: %f" % table_area_percentage
        }
        return (rtn_msg, None)

    ## find top line of table
    hull_table = np.array(zc.sort_pts(hull_table[:, 0, :], order_first='y'))
    ul = hull_table[0]
    ur = hull_table[1]
    if ul[0] > ur[0]:
        t = ul
        ul = ur
        ur = t
    i = 2
    # the top two points in the hull are probably on the top line, but may not be the corners
    while i < hull_table.shape[0] and hull_table[i, 1] - hull_table[0, 1] < 80:
        pt_tmp = hull_table[i]
        if pt_tmp[0] < ul[0] or pt_tmp[0] > ur[0]:
            # computing the area of the part of triangle that lies inside the table
            triangle = np.vstack([pt_tmp, ul, ur]).astype(np.int32)
            mask_triangle = np.zeros_like(mask_table)
            cv2.drawContours(mask_triangle, [triangle], 0, 255, -1)
            pts = mask_table_raw[mask_triangle.astype(bool)]
            if np.sum(pts == 255) > 10:
                break
            if pt_tmp[0] < ul[0]:
                ul = pt_tmp
            else:
                ur = pt_tmp
            i += 1
        else:
            break
    ul = [int(x) for x in ul]
    ur = [int(x) for x in ur]

    ## sanity checks about table top line detection
    if zc.euc_dist(ul, ur)**2 * 2.5 < table_area:
        rtn_msg = {'status': 'fail', 'message': "Table top line too short"}
        return (rtn_msg, None)
    if abs(zc.line_angle(ul, ur)) > 0.4:
        rtn_msg = {
            'status': 'fail',
            'message': "Table top line tilted too much"
        }
        return (rtn_msg, None)
    # check if two table sides form a reasonable angle
    mask_table_bottom = mask_table.copy()
    mask_table_bottom[:-30] = 0
    p_left_most = zc.get_edge_point(mask_table_bottom, (-1, 0))
    p_right_most = zc.get_edge_point(mask_table_bottom, (1, 0))
    if p_left_most is None or p_right_most is None:
        rtn_msg = {
            'status': 'fail',
            'message': "Table doesn't occupy bottom part of image"
        }
        return (rtn_msg, None)
    left_side_angle = zc.line_angle(ul, p_left_most)
    right_side_angle = zc.line_angle(ur, p_right_most)
    angle_diff = zc.angle_dist(left_side_angle,
                               right_side_angle,
                               angle_range=math.pi * 2)
    if abs(angle_diff) > 1.8:
        rtn_msg = {
            'status': 'fail',
            'message': "Angle between two side edge not right"
        }
        return (rtn_msg, None)

    if 'table' in display_list:
        img_table = img.copy()
        img_table[mask_table.astype(bool), :] = [255, 0, 255]
        cv2.line(img_table, tuple(ul), tuple(ur), [0, 255, 0], 3)
        zc.check_and_display('table',
                             img_table,
                             display_list,
                             resize_max=config.DISPLAY_MAX_PIXEL,
                             wait_time=config.DISPLAY_WAIT_TIME)

    ## rotate to make opponent upright, use table edge as reference
    pts1 = np.float32(
        [ul, ur, [ul[0] + (ur[1] - ul[1]), ul[1] - (ur[0] - ul[0])]])
    pts2 = np.float32([[0, config.O_IMG_HEIGHT],
                       [config.O_IMG_WIDTH, config.O_IMG_HEIGHT], [0, 0]])
    M = cv2.getAffineTransform(pts1, pts2)
    img[np.bitwise_not(zc.get_mask(img, rtn_type="bool", th=3)), :] = [3, 3, 3]
    img_rotated = cv2.warpAffine(img, M,
                                 (config.O_IMG_WIDTH, config.O_IMG_HEIGHT))

    ## sanity checks about rotated opponent image
    bool_img_rotated_valid = zc.get_mask(img_rotated, rtn_type="bool")
    if float(bool_img_rotated_valid.sum()
             ) / config.O_IMG_WIDTH / config.O_IMG_HEIGHT < 0.6:
        rtn_msg = {
            'status': 'fail',
            'message': "Valid area too small after rotation"
        }
        return (rtn_msg, None)

    rtn_msg = {'status': 'success'}
    return (rtn_msg, (img_rotated, mask_table, M))
コード例 #32
0
def AffineTransform(dst_img, img, src_point, dst_point):
    rows, cols, ch = dst_img.shape
    M = cv2.getAffineTransform(src_point, dst_point)
    dst = cv2.warpAffine(img, M, (cols, rows))
    return dst
コード例 #33
0
def find_rightcounter(car_pic, color):
    if type(car_pic) == type(""):
        #如果输入是一个字符串,则从它指向的路径读取图片
        img = imreadex(car_pic)
    else:
        #否则,认为输入就是一张图片
        img = car_pic
    oldimg = img
    original = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    pic_hight, pic_width = img.shape[:2]

    #如果输入图像过大,则调整图像大小
    #if pic_width > 500:
    #	resize_rate = 500 / pic_width
    #	img = cv2.resize(img, (500, int(pic_hight*resize_rate)), interpolation=cv2.INTER_AREA)
    #高斯模糊去噪
    blur = 3
    if blur > 0:
        img = cv2.GaussianBlur(img, (blur, blur), 0)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    #找到图像边缘
    #ret, img_thresh = cv2.threshold(img_opening, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)#大津阈值
    #img_edge = cv2.Canny(img_thresh, 50, 200)#canny算子提取边缘

    ret, img_edge = cv2.threshold(img, 0, 255,
                                  cv2.THRESH_BINARY + cv2.THRESH_OTSU)  #大津阈值
    #img_edge = cv2.bitwise_not(img_edge)
    cv2.imshow("1", img_edge)

    #边缘整体化,使用开运算和闭运算让图像边缘成为一个整体
    kernel = np.ones((20, 20), np.uint8)
    img_edge1 = cv2.morphologyEx(img_edge, cv2.MORPH_CLOSE, kernel)
    img_edge2 = cv2.morphologyEx(img_edge1, cv2.MORPH_OPEN, kernel)

    #查找图像边缘整体形成的矩形区域,可能有很多,车牌就在其中一个矩形区域中
    #coutours里面保存的是轮廓里面的点,cv2.CHAIN_APPROX_SIMPLE表示:只保存角点
    image, contours, hierarchy = cv2.findContours(
        img_edge2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)  #检测物体轮廓
    tmp1 = image
    tmp1 = cv2.cvtColor(tmp1, cv2.COLOR_GRAY2BGR)  #灰度图转化为彩色图

    #参数-1表示画出所有轮廓
    #参数(0, 255, 0)表示为轮廓上色的画笔颜色为绿色
    #参数2表示画笔的粗细为2
    cv2.drawContours(tmp1, contours, -1, (0, 255, 0), 2)  # 画出分割出来的轮廓

    #挑选出面积大于Min_Area的轮廓
    contours = [cnt for cnt in contours if cv2.contourArea(cnt) > 500]
    #print('len(contours)', len(contours))
    tmp2 = image
    tmp2 = cv2.cvtColor(tmp2, cv2.COLOR_GRAY2BGR)  #灰度图转化为彩色图
    cv2.drawContours(tmp2, contours, -1, (0, 255, 0), 2)

    cv2.imshow("contours", tmp2)
    ##########################################################计算车牌可能出现矩形区域(开始)###############################################
    car_contours = []
    tmp3 = image
    tmp3 = cv2.cvtColor(tmp3, cv2.COLOR_GRAY2RGB)  #灰度图转化为彩色图
    tmp4 = copy.deepcopy(oldimg)  #import copy
    tmp4 = cv2.cvtColor(tmp4, cv2.COLOR_BGR2RGB)
    for cnt in contours:
        #使用cv2.minAreaRect函数生成每个轮廓的最小外界矩形
        #输入为表示轮廓的点集
        #返回值rect中包含最小外接矩形的中心坐标,宽度高度和旋转角度(但是这里的宽度和高度不是按照其长度来定义的)
        rect = cv2.minAreaRect(cnt)
        area_width, area_height = rect[1]
        #做一定调整,保证宽度大于高度
        if area_width < area_height:
            area_width, area_height = area_height, area_width
        wh_ratio = area_width / area_height
        #print(wh_ratio)

        #要求矩形区域长宽比在2到5.5之间,2到5.5是车牌的长宽比,其余的矩形排除
        if wh_ratio > 2 and wh_ratio < 5.5:
            car_contours.append(rect)
            box = cv2.boxPoints(rect)
            box = np.int0(box)
            cv2.drawContours(tmp3, [box], -1, (0, 255, 0), 2)
            cv2.drawContours(tmp4, [box], -1, (0, 255, 0), 2)

    ######################################################将倾斜的矩形调整为不倾斜(开始)###################################################
    # print("精确定位")
    print(rect)
    card_imgs = []
    #矩形区域可能是倾斜的矩形,需要矫正,以便使用颜色定位
    for rect in car_contours:
        #调整角度,使得矩形框左高右低
        #0度和1度之间的所有角度当作1度处理,-1度和0度之间的所有角度当作-1度处理
        #这个处理是必要的,如果不做这个处理的话,后面仿射变换可能会得到一张全灰的图片
        #因为如果角度接近于0,那么矩形四个角点中任意两个角点,其某一个坐标非常接近,这种
        #情况下,哪个角点在最上边,哪个角点在最下边,哪个角点在最左边,哪个角点在最右边,就
        #没有了很强的区分度,所以仿射变换控制点的对应关系,很可能出现错配,造成仿射变换失败
        if rect[2] > -1:
            angle = -1
        else:
            angle = rect[2]

        #扩大范围,避免车牌边缘被排除
        rect = (rect[0], (rect[1][0] + 5, rect[1][1] + 5), angle)
        box = cv2.boxPoints(rect)

        #bottom_point:矩形框4个角中最下面的点
        #right_point:矩形框4个角中最右边的点
        #left_point:矩形框4个角中最左边的点
        #top_point:矩形框4个角中最上面的点
        bottom_point = right_point = [0, 0]
        left_point = top_point = [pic_width, pic_hight]
        for point in box:
            if left_point[0] > point[0]:
                left_point = point
            if top_point[1] > point[1]:
                top_point = point
            if bottom_point[1] < point[1]:
                bottom_point = point
            if right_point[0] < point[0]:
                right_point = point

        #这里需要注意的是:cv2.boxPoints检测矩形,返回值中角度的范围是[-90, 0],所以该函数中并不是长度大的作为底,长度
        #小的作为高,而是以从x轴逆时针旋转,最先到达的边为底,另一条边为高
        #这里为了矫正图像所作的仿射变换只能处理小角度,若角度过大,畸变很严重
        #在该程序里,没有对矩形做旋转,然后再仿射变换,而是直接做仿射变换,所以只有当带识别图片中车牌位于水平位置附近时,
        #才能正确识别,而当车牌绕着垂直于车牌的轴有较大转动时,识别就会失败
        if left_point[1] <= right_point[1]:  #正角度
            new_right_point = [right_point[0], bottom_point[1]]
            pts2 = np.float32([left_point, bottom_point, new_right_point])
            pts1 = np.float32([left_point, bottom_point, right_point])
            #用3个控制点进行仿射变换
            M = cv2.getAffineTransform(pts1, pts2)
            dst = cv2.warpAffine(oldimg, M, (pic_width, pic_hight))

            point_limit(new_right_point)
            point_limit(bottom_point)
            point_limit(left_point)
            card_img = dst[int(left_point[1]):int(bottom_point[1]),
                           int(left_point[0]):int(new_right_point[0])]
            card_imgs.append(card_img)
        elif left_point[1] > right_point[1]:  #负角度
            new_left_point = [left_point[0], bottom_point[1]]
            pts2 = np.float32([new_left_point, bottom_point, right_point])
            pts1 = np.float32([left_point, bottom_point, right_point])
            #仿射变换
            M = cv2.getAffineTransform(pts1, pts2)
            dst = cv2.warpAffine(oldimg, M, (pic_width, pic_hight))

            point_limit(right_point)
            point_limit(bottom_point)
            point_limit(new_left_point)
            card_img = dst[int(right_point[1]):int(bottom_point[1]),
                           int(new_left_point[0]):int(right_point[0])]
            card_imgs.append(card_img)
        ######################################################将倾斜的矩形调整为不倾斜(结束)########################################

        ########################################################根据车牌颜色再定位,缩小边缘非车牌边界(开始)#####################################
        colors = []
        #enumerate是python的内置函数,对于一个可遍历的对象,enumerate将其组成一个索引序列,利用它可以同时获得索引和值
        for card_index, card_img in enumerate(card_imgs):
            if card_img is None:
                continue
            card_img_hsv = cv2.cvtColor(card_img, cv2.COLOR_BGR2HSV)
            #有转换失败的可能,原因来自于上面矫正矩形出错

            row_num, col_num = card_img_hsv.shape[:2]
            xl, xr, yb, yt, limit1, limit2 = accurate_place(
                card_img_hsv, color)
            if yt == yb and xl == xr:
                continue
            need_accurate = False
            if yt >= yb:
                yt = 0
                yb = row_num
                need_accurate = True
            if xl >= xr:
                xl = 0
                xr = col_num
                need_accurate = True
            card_imgs[card_index] = card_img[
                yt:yb,
                xl:xr] if color != "g" or yt < (yb - yt) // 4 else card_img[
                    yt - (yb - yt) // 4:yb, xl:xr]
            if need_accurate:  #可能x或y方向未缩小,需要再试一次
                card_img = card_imgs[card_index]
                card_img_hsv = cv2.cvtColor(card_img, cv2.COLOR_BGR2HSV)
                xl, xr, yb, yt, limit1, limit2 = accurate_place(
                    card_img_hsv, color)
                if yt == yb and xl == xr:
                    continue
                if yt >= yb:
                    yt = 0
                    yb = row_num
                if xl >= xr:
                    xl = 0
                    xr = col_num
            card_imgs[card_index] = card_img[
                yt:yb,
                xl:xr] if color != "g" or yt < (yb - yt) // 4 else card_img[
                    yt - (yb - yt) // 4:yb, xl:xr]
            cv2.imshow("ok" + str(card_index), card_imgs[card_index])
コード例 #34
0
                    s2=src_pts_new_y[j]-src_pts_new_y[i]#先求出x2-x1和y2-y1,避免后面重复计算
                    for k in range(j+1,count_pts):
                        s4=abs(s1*(src_pts_new_y[k]-src_pts_new_y[i])-(src_pts_new_x[k]-src_pts_new_x[i])*s2)/2
                        if s<s4:
                            s=s4
                            idx_0 = i
                            idx_1 = j
                            idx_2 = k


            src_pts_new = np.float32([[src_pts_new_x[idx_0],src_pts_new_y[idx_0]], [src_pts_new_x[idx_1],src_pts_new_y[idx_1]], [src_pts_new_x[idx_2],src_pts_new_y[idx_2]]])
            dst_pts_new = np.float32([[dst_pts_new_x[idx_0],dst_pts_new_y[idx_0]], [dst_pts_new_x[idx_1],dst_pts_new_y[idx_1]], [dst_pts_new_x[idx_2],dst_pts_new_y[idx_2]]])

            print("src_pts_new: ", src_pts_new)
            print("dst_pts_new: ", dst_pts_new)
            M_new = cv2.getAffineTransform(dst_pts_new, src_pts_new)

            affined_image = cv2.warpAffine(detect_image, M_new, (detect_image.shape[1], detect_image.shape[0]))
            affined_image_lb = cv2.warpAffine(label_image, M_new, (detect_image.shape[1], detect_image.shape[0]))

            affined_image_bb = cv2.rectangle(affined_image, (203, 211), (1184, 432), (255,255,255), 2)
            affined_image_lb_bb = cv2.rectangle(affined_image_lb, (203, 211), (1184, 432), (255,255,255), 2)

            affined_image_crop = affined_image_bb[211:432, 203:1184]
            affined_image_lb_crop = affined_image_lb_bb[211:432, 203:1184]

            cv2.imwrite(crop_defect_path + filename+"croppic.png", affined_image_crop)
            cv2.imwrite(crop_label_path + filename+"croplb.png", affined_image_lb_crop)  

        else:
            print("Not enough matches are found - %d/%d" % (len(good), MIN_MATCH_COUNT))
コード例 #35
0
ファイル: transforms.py プロジェクト: olatarkowska/cellpose
def random_rotate_and_resize(X, Y=None, scale_range=1., xy = (224,224), 
                             do_flip=True, rescale=None, unet=False):
    """ augmentation by random rotation and resizing

        X and Y are lists or arrays of length nimg, with dims channels x Ly x Lx (channels optional)

        Parameters
        ----------
        X: LIST of ND-arrays, float
            list of image arrays of size [nchan x Ly x Lx] or [Ly x Lx]

        Y: LIST of ND-arrays, float (optional, default None)
            list of image labels of size [nlabels x Ly x Lx] or [Ly x Lx]. The 1st channel
            of Y is always nearest-neighbor interpolated (assumed to be masks or 0-1 representation).
            If Y.shape[0]==3 and not unet, then the labels are assumed to be [cell probability, Y flow, X flow]. 
            If unet, second channel is dist_to_bound.

        scale_range: float (optional, default 1.0)
            Range of resizing of images for augmentation. Images are resized by
            (1-scale_range/2) + scale_range * np.random.rand()

        xy: tuple, int (optional, default (224,224))
            size of transformed images to return

        do_flip: bool (optional, default True)
            whether or not to flip images horizontally

        rescale: array, float (optional, default None)
            how much to resize images by before performing augmentations

        unet: bool (optional, default False)

        Returns
        -------
        imgi: ND-array, float
            transformed images in array [nimg x nchan x xy[0] x xy[1]]

        lbl: ND-array, float
            transformed labels in array [nimg x nchan x xy[0] x xy[1]]

        scale: array, float
            amount each image was resized by

    """
    scale_range = max(0, min(2, float(scale_range)))
    nimg = len(X)
    if X[0].ndim>2:
        nchan = X[0].shape[0]
    else:
        nchan = 1
    imgi  = np.zeros((nimg, nchan, xy[0], xy[1]), np.float32)

    lbl = []
    if Y is not None:
        if Y[0].ndim>2:
            nt = Y[0].shape[0]
        else:
            nt = 1
        lbl = np.zeros((nimg, nt, xy[0], xy[1]), np.float32)

    scale = np.zeros(nimg, np.float32)
    for n in range(nimg):
        Ly, Lx = X[n].shape[-2:]

        # generate random augmentation parameters
        flip = np.random.rand()>.5
        theta = np.random.rand() * np.pi * 2
        scale[n] = (1-scale_range/2) + scale_range * np.random.rand()
        if rescale is not None:
            scale[n] *= 1. / rescale[n]
        dxy = np.maximum(0, np.array([Lx*scale[n]-xy[1],Ly*scale[n]-xy[0]]))
        dxy = (np.random.rand(2,) - .5) * dxy

        # create affine transform
        cc = np.array([Lx/2, Ly/2])
        cc1 = cc - np.array([Lx-xy[1], Ly-xy[0]])/2 + dxy
        pts1 = np.float32([cc,cc + np.array([1,0]), cc + np.array([0,1])])
        pts2 = np.float32([cc1,
                cc1 + scale[n]*np.array([np.cos(theta), np.sin(theta)]),
                cc1 + scale[n]*np.array([np.cos(np.pi/2+theta), np.sin(np.pi/2+theta)])])
        M = cv2.getAffineTransform(pts1,pts2)

        img = X[n].copy()
        if Y is not None:
            labels = Y[n].copy()
            if labels.ndim<3:
                labels = labels[np.newaxis,:,:]

        if flip and do_flip:
            img = img[..., ::-1]
            if Y is not None:
                labels = labels[..., ::-1]
                if nt > 1 and not unet:
                    labels[2] = -labels[2]

        for k in range(nchan):
            I = cv2.warpAffine(img[k], M, (xy[1],xy[0]), flags=cv2.INTER_LINEAR)
            imgi[n,k] = I

        if Y is not None:
            for k in range(nt):
                if k==0:
                    lbl[n,k] = cv2.warpAffine(labels[k], M, (xy[1],xy[0]), flags=cv2.INTER_NEAREST)
                else:
                    lbl[n,k] = cv2.warpAffine(labels[k], M, (xy[1],xy[0]), flags=cv2.INTER_LINEAR)

            if nt > 1 and not unet:
                v1 = lbl[n,2].copy()
                v2 = lbl[n,1].copy()
                lbl[n,1] = (-v1 * np.sin(-theta) + v2*np.cos(-theta))
                lbl[n,2] = (v1 * np.cos(-theta) + v2*np.sin(-theta))

    return imgi, lbl, scale
コード例 #36
0
def get_transform_mat(image_landmarks, output_size, face_type, scale=1.0):
    if not isinstance(image_landmarks, np.ndarray):
        image_landmarks = np.array(image_landmarks)

    # estimate landmarks transform from global space to local aligned space with bounds [0..1]
    mat = umeyama(
        np.concatenate([image_landmarks[17:49], image_landmarks[54:55]]),
        landmarks_2D_new, True)[0:2]

    # get corner points in global space
    g_p = transform_points(
        np.float32([(0, 0), (1, 0), (1, 1), (0, 1), (0.5, 0.5)]), mat, True)
    g_c = g_p[4]

    # calc diagonal vectors between corners in global space
    tb_diag_vec = (g_p[2] - g_p[0]).astype(np.float32)
    tb_diag_vec /= npla.norm(tb_diag_vec)
    bt_diag_vec = (g_p[1] - g_p[3]).astype(np.float32)
    bt_diag_vec /= npla.norm(bt_diag_vec)

    # calc modifier of diagonal vectors for scale and padding value
    padding, remove_align = FaceType_to_padding_remove_align.get(
        face_type, 0.0)
    mod = (1.0 / scale) * (npla.norm(g_p[0] - g_p[2]) *
                           (padding * np.sqrt(2.0) + 0.5))

    if face_type == FaceType.WHOLE_FACE:
        # adjust center for WHOLE_FACE, 7% below in order to cover more forehead
        vec = (g_p[0] - g_p[3]).astype(np.float32)
        vec_len = npla.norm(vec)
        vec /= vec_len

        g_c += vec * vec_len * 0.07

    # calc 3 points in global space to estimate 2d affine transform
    if not remove_align:
        l_t = np.array([
            np.round(g_c - tb_diag_vec * mod),
            np.round(g_c + bt_diag_vec * mod),
            np.round(g_c + tb_diag_vec * mod)
        ])
    else:
        # remove_align - face will be centered in the frame but not aligned
        l_t = np.array([
            np.round(g_c - tb_diag_vec * mod),
            np.round(g_c + bt_diag_vec * mod),
            np.round(g_c + tb_diag_vec * mod),
            np.round(g_c - bt_diag_vec * mod),
        ])

        # get area of face square in global space
        area = mathlib.polygon_area(l_t[:, 0], l_t[:, 1])

        # calc side of square
        side = np.float32(math.sqrt(area) / 2)

        # calc 3 points with unrotated square
        l_t = np.array([
            np.round(g_c + [-side, -side]),
            np.round(g_c + [side, -side]),
            np.round(g_c + [side, side])
        ])

    # calc affine transform from 3 global space points to 3 local space points size of 'output_size'
    pts2 = np.float32(((0, 0), (output_size, 0), (output_size, output_size)))
    mat = cv2.getAffineTransform(l_t, pts2)
    return mat
コード例 #37
0
def elastic_transform(image,
                      alpha,
                      sigma,
                      alpha_affine,
                      interpolation=cv2.INTER_LINEAR,
                      border_mode=cv2.BORDER_REFLECT_101,
                      random_state=None,
                      approximate=False):
    """Elastic deformation of images as described in [Simard2003]_ (with modifications).
    Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5

    .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
         Convolutional Neural Networks applied to Visual Document Analysis", in
         Proc. of the International Conference on Document Analysis and
         Recognition, 2003.
    """
    if random_state is None:
        random_state = np.random.RandomState(1234)

    height, width = image.shape[:2]

    # Random affine
    center_square = np.float32((height, width)) // 2
    square_size = min((height, width)) // 3
    alpha = float(alpha)
    sigma = float(sigma)
    alpha_affine = float(alpha_affine)

    pts1 = np.float32([
        center_square + square_size,
        [center_square[0] + square_size, center_square[1] - square_size],
        center_square - square_size
    ])
    pts2 = pts1 + random_state.uniform(
        -alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
    matrix = cv2.getAffineTransform(pts1, pts2)

    image = cv2.warpAffine(image,
                           matrix, (width, height),
                           flags=interpolation,
                           borderMode=border_mode)

    if approximate:
        # Approximate computation smooth displacement map with a large enough kernel.
        # On large images (512+) this is approximately 2X times faster
        dx = (random_state.rand(height, width).astype(np.float32) * 2 - 1)
        cv2.GaussianBlur(dx, (17, 17), sigma, dst=dx)
        dx *= alpha

        dy = (random_state.rand(height, width).astype(np.float32) * 2 - 1)
        cv2.GaussianBlur(dy, (17, 17), sigma, dst=dy)
        dy *= alpha
    else:
        dx = np.float32(
            gaussian_filter(
                (random_state.rand(height, width) * 2 - 1), sigma) * alpha)
        dy = np.float32(
            gaussian_filter(
                (random_state.rand(height, width) * 2 - 1), sigma) * alpha)

    x, y = np.meshgrid(np.arange(width), np.arange(height))

    mapx = np.float32(x + dx)
    mapy = np.float32(y + dy)

    return cv2.remap(image, mapx, mapy, interpolation, borderMode=border_mode)
 def elastic_transform3Dv2(self,
                           image,
                           alpha,
                           sigma,
                           alpha_affine,
                           random_state=None):
     """Elastic deformation of images as described in [Simard2003]_ (with modifications).
     .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
          Convolutional Neural Networks applied to Visual Document Analysis", in
          Proc. of the International Conference on Document Analysis and
          Recognition, 2003.
      Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
      From https://www.kaggle.com/bguberfain/elastic-transform-for-data-augmentation
     """
     # affine and deformation must be slice by slice and fixed for slices
     if random_state is None:
         random_state = np.random.RandomState(None)
     shape = image.shape  # image is contatenated, the first channel [:,:,:,0] is the image, the second channel
     # [:,:,:,1] is the mask. The two channel are under the same tranformation.
     shape_size = shape[:-1]  # z y x
     # Random affine
     shape_size_aff = shape[1:-1]  # y x
     center_square = np.float32(shape_size_aff) // 2
     square_size = min(shape_size_aff) // 3
     pts1 = np.float32([
         center_square + square_size,
         [center_square[0] + square_size, center_square[1] - square_size],
         center_square - square_size
     ])
     pts2 = pts1 + random_state.uniform(
         -alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
     M = cv2.getAffineTransform(pts1, pts2)
     new_img = np.zeros_like(image)
     for i in range(shape[0]):
         new_img[i, :, :,
                 0] = cv2.warpAffine(image[i, :, :, 0],
                                     M,
                                     shape_size_aff[::-1],
                                     borderMode=cv2.BORDER_CONSTANT,
                                     borderValue=0.)
         for j in range(1, 10):
             new_img[i, :, :,
                     j] = cv2.warpAffine(image[i, :, :, j],
                                         M,
                                         shape_size_aff[::-1],
                                         flags=cv2.INTER_NEAREST,
                                         borderMode=cv2.BORDER_TRANSPARENT,
                                         borderValue=0)
     dx = gaussian_filter(
         (random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha
     dy = gaussian_filter(
         (random_state.rand(*shape[1:-1]) * 2 - 1), sigma) * alpha
     x, y = np.meshgrid(np.arange(shape_size_aff[1]),
                        np.arange(shape_size_aff[0]))
     indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1))
     new_img2 = np.zeros_like(image)
     for i in range(shape[0]):
         new_img2[i, :, :,
                  0] = map_coordinates(new_img[i, :, :, 0],
                                       indices,
                                       order=1,
                                       mode='constant').reshape(shape[1:-1])
         for j in range(1, 10):
             new_img2[i, :, :,
                      j] = map_coordinates(new_img[i, :, :, j],
                                           indices,
                                           order=0,
                                           mode='constant').reshape(
                                               shape[1:-1])
     return np.array(new_img2), new_img
コード例 #39
0
    def align(self,
              imgDim,
              rgbImg,
              bb=None,
              pad=None,
              ts=None,
              landmarks=None,
              landmarkIndices=INNER_EYES_AND_BOTTOM_LIP,
              opencv_det=False,
              opencv_model="../model/opencv/cascade.xml",
              only_crop=False):
        r"""align(imgDim, rgbImg, bb=None, landmarks=None, landmarkIndices=INNER_EYES_AND_BOTTOM_LIP)

        Transform and align a face in an image.

        :param imgDim: The edge length in pixels of the square the image is resized to.
        :type imgDim: int
        :param rgbImg: RGB image to process. Shape: (height, width, 3)
        :type rgbImg: numpy.ndarray
        :param bb: Bounding box around the face to align. \
                   Defaults to the largest face.
        :type bb: dlib.rectangle
        :param pad: padding bb by left, top, right, bottom
        :type pad: list
        :param landmarks: Detected landmark locations. \
                          Landmarks found on `bb` if not provided.
        :type landmarks: list of (x,y) tuples
        :param landmarkIndices: The indices to transform to.
        :type landmarkIndices: list of ints
        :return: The aligned RGB image. Shape: (imgDim, imgDim, 3)
        :rtype: numpy.ndarray
        """
        assert imgDim is not None
        assert rgbImg is not None
        assert landmarkIndices is not None

        if bb is None:
            if opencv_det:
                face_cascade = cv2.CascadeClassifier(opencv_model)
                faces = face_cascade.detectMultiScale(rgbImg,
                                                      1.1,
                                                      2,
                                                      minSize=(30, 30))
                dlib_rects = []
                for (x, y, w, h) in faces:
                    dlib_rects.append(
                        dlib.rectangle(int(x), int(y), int(x + w), int(y + h)))
                    if len(faces) > 0:
                        bb = max(dlib_rects,
                                 key=lambda rect: rect.width() * rect.height())
                    else:
                        bb = None
            else:
                bb = self.getLargestFaceBoundingBox(rgbImg)
            if bb is None:
                return
            if pad is not None:
                left = int(max(0, bb.left() - bb.width() * float(pad[0])))
                top = int(max(0, bb.top() - bb.height() * float(pad[1])))
                right = int(
                    min(rgbImg.shape[1],
                        bb.right() + bb.width() * float(pad[2])))
                bottom = int(
                    min(rgbImg.shape[0],
                        bb.bottom() + bb.height() * float(pad[3])))
                bb = dlib.rectangle(left, top, right, bottom)

        if landmarks is None:
            landmarks = self.findLandmarks(rgbImg, bb)

        npLandmarks = np.float32(landmarks)
        npLandmarkIndices = np.array(landmarkIndices)

        dstLandmarks = imgDim * MINMAX_TEMPLATE[npLandmarkIndices]
        if ts is not None:
            # reserve more area of forehead on a face
            dstLandmarks[(0, 1),
                         1] = dstLandmarks[(0, 1), 1] + imgDim * float(ts)
            dstLandmarks[2, 1] = dstLandmarks[2, 1] + imgDim * float(ts) / 2
        if not only_crop:
            H = cv2.getAffineTransform(npLandmarks[npLandmarkIndices],
                                       dstLandmarks)
            return cv2.warpAffine(rgbImg, H, (imgDim, imgDim))
        else:
            return rgbImg[top:bottom,
                          left:right]  # crop is rgbImg[y: y + h, x: x + w]
コード例 #40
0
# Copyright 2017 BIG VISION LLC ALL RIGHTS RESERVED
# 
# This code is made available to the students of 
# the online course titled "Computer Vision for Faces" 
# by Satya Mallick for personal non-commercial use. 
#
# Sharing this code is strictly prohibited without written
# permission from Big Vision LLC. 
#
# For licensing and other inquiries, please email 
# [email protected] 
#

import cv2
import numpy as np 

# Input triangle
inp = np.float32([[50, 50], [100, 100], [200, 150]])
# Output triangle
output = np.float32([[72, 51], [142, 101], [272, 136]])
# Another output triangle
output2 = np.float32([[77, 76], [152, 151], [287, 236]])

# Get the tranformation matrices
warpMat = cv2.getAffineTransform(inp, output)
warpMat2 = cv2.getAffineTransform(inp, output2)

# Display the matrices
print ("Warp Matrix 1 : \n {} \n".format(warpMat))
print ("Warp Matrix 2 : \n {} \n".format(warpMat2))
コード例 #41
0
         maxArea, maxD = d.area, d
 d = maxD
 print("Detection with max area: Left: {} Top: {} Right: {} Bottom: {}".
       format(d.left(), d.top(), d.right(), d.bottom()))
 # Get the landmarks/parts for the face in box d.
 #win.clear_overlay()
 #win.set_image(img)
 shape = predictor(img, d)
 #win.add_overlay(shape)
 #print("Part 0: {}, Part 1: {} ...".format(shape.part(0),shape.part(1)))
 landmarks = list(map(lambda p: (p.x, p.y), shape.parts()))
 npLandmarks = np.float32(landmarks)
 if MODE == 0:
     npLandmarkIndices = np.array(landmarkIndices)
     H = cv2.getAffineTransform(
         npLandmarks[npLandmarkIndices],
         imgDim * MINMAX_TEMPLATE[npLandmarkIndices] + imgDim *
         (ENLARGE - 1.0) / 2)
     thumbnail = cv2.warpAffine(
         img, H, (int(imgDim * ENLARGE), int(imgDim * ENLARGE)))
     fnSurf = '_crop.jpg'
 else:
     thumbnail = fronter.frontalizeImage(img, d, npLandmarks)
     cut = int(thumbnail.shape[0] / 8)
     thumbnail = thumbnail[cut:thumbnail.shape[0] - cut,
                           cut:thumbnail.shape[1] - cut, :].copy()
     fnSurf = '_frontal.jpg'
 #cv2.imshow('normalizedFace',thumbnail)
 imPath, imName = os.path.split(f)
 if FILTER_MODE == 1:
     k = cv2.waitKey(-1)
     if k == 49:
コード例 #42
0
def ConvertMaskedFace(predictor_func, predictor_input_shape, cfg, frame_info,
                      img_bgr_uint8, img_bgr, img_face_landmarks):
    img_size = img_bgr.shape[1], img_bgr.shape[0]

    img_face_mask_a = LandmarksProcessor.get_image_hull_mask(
        img_bgr.shape, img_face_landmarks)

    if cfg.mode == 'original':
        if cfg.export_mask_alpha:
            img_bgr = np.concatenate([img_bgr, img_face_mask_a], -1)
        return img_bgr, img_face_mask_a

    out_img = img_bgr.copy()
    out_merging_mask = None

    output_size = predictor_input_shape[0]
    if cfg.super_resolution_mode != 0:
        output_size *= 2

    face_mat = LandmarksProcessor.get_transform_mat(img_face_landmarks,
                                                    output_size,
                                                    face_type=cfg.face_type)
    face_output_mat = LandmarksProcessor.get_transform_mat(
        img_face_landmarks,
        output_size,
        face_type=cfg.face_type,
        scale=1.0 + 0.01 * cfg.output_face_scale)

    dst_face_bgr = cv2.warpAffine(img_bgr,
                                  face_mat, (output_size, output_size),
                                  flags=cv2.INTER_CUBIC)
    dst_face_mask_a_0 = cv2.warpAffine(img_face_mask_a,
                                       face_mat, (output_size, output_size),
                                       flags=cv2.INTER_CUBIC)

    predictor_input_bgr = cv2.resize(dst_face_bgr, predictor_input_shape[0:2])

    predicted = predictor_func(predictor_input_bgr)
    if isinstance(predicted, tuple):
        #converter return bgr,mask
        prd_face_bgr = np.clip(predicted[0], 0, 1.0)
        prd_face_mask_a_0 = np.clip(predicted[1], 0, 1.0)
        predictor_masked = True
    else:
        #converter return bgr only, using dst mask
        prd_face_bgr = np.clip(predicted, 0, 1.0)
        prd_face_mask_a_0 = cv2.resize(dst_face_mask_a_0,
                                       predictor_input_shape[0:2])
        predictor_masked = False

    if cfg.super_resolution_mode:
        prd_face_bgr = cfg.superres_func(cfg.super_resolution_mode,
                                         prd_face_bgr)

        if predictor_masked:
            prd_face_mask_a_0 = cv2.resize(prd_face_mask_a_0,
                                           (output_size, output_size),
                                           cv2.INTER_CUBIC)
        else:
            prd_face_mask_a_0 = cv2.resize(dst_face_mask_a_0,
                                           (output_size, output_size),
                                           cv2.INTER_CUBIC)

    if cfg.mask_mode == 2:  #dst
        prd_face_mask_a_0 = cv2.resize(dst_face_mask_a_0,
                                       (output_size, output_size),
                                       cv2.INTER_CUBIC)
    elif cfg.mask_mode >= 3 and cfg.mask_mode <= 7:

        if cfg.mask_mode == 3 or cfg.mask_mode == 5 or cfg.mask_mode == 6:
            prd_face_fanseg_bgr = cv2.resize(prd_face_bgr,
                                             (cfg.fanseg_input_size, ) * 2)
            prd_face_fanseg_mask = cfg.fanseg_extract_func(
                FaceType.FULL, prd_face_fanseg_bgr)
            FAN_prd_face_mask_a_0 = cv2.resize(prd_face_fanseg_mask,
                                               (output_size, output_size),
                                               cv2.INTER_CUBIC)

        if cfg.mask_mode >= 4 or cfg.mask_mode <= 7:

            full_face_fanseg_mat = LandmarksProcessor.get_transform_mat(
                img_face_landmarks,
                cfg.fanseg_input_size,
                face_type=FaceType.FULL)
            dst_face_fanseg_bgr = cv2.warpAffine(img_bgr,
                                                 full_face_fanseg_mat,
                                                 (cfg.fanseg_input_size, ) * 2,
                                                 flags=cv2.INTER_CUBIC)
            dst_face_fanseg_mask = cfg.fanseg_extract_func(
                FaceType.FULL, dst_face_fanseg_bgr)

            if cfg.face_type == FaceType.FULL:
                FAN_dst_face_mask_a_0 = cv2.resize(dst_face_fanseg_mask,
                                                   (output_size, output_size),
                                                   cv2.INTER_CUBIC)
            elif cfg.face_type == FaceType.HALF:
                half_face_fanseg_mat = LandmarksProcessor.get_transform_mat(
                    img_face_landmarks,
                    cfg.fanseg_input_size,
                    face_type=FaceType.HALF)

                fanseg_rect_corner_pts = np.array(
                    [[0, 0], [cfg.fanseg_input_size - 1, 0],
                     [0, cfg.fanseg_input_size - 1]],
                    dtype=np.float32)
                a = LandmarksProcessor.transform_points(fanseg_rect_corner_pts,
                                                        half_face_fanseg_mat,
                                                        invert=True)
                b = LandmarksProcessor.transform_points(
                    a, full_face_fanseg_mat)
                m = cv2.getAffineTransform(b, fanseg_rect_corner_pts)
                FAN_dst_face_mask_a_0 = cv2.warpAffine(
                    dst_face_fanseg_mask,
                    m, (cfg.fanseg_input_size, ) * 2,
                    flags=cv2.INTER_CUBIC)
                FAN_dst_face_mask_a_0 = cv2.resize(FAN_dst_face_mask_a_0,
                                                   (output_size, output_size),
                                                   cv2.INTER_CUBIC)
            else:
                raise ValueError("cfg.face_type unsupported")

        if cfg.mask_mode == 3:  #FAN-prd
            prd_face_mask_a_0 = FAN_prd_face_mask_a_0
        elif cfg.mask_mode == 4:  #FAN-dst
            prd_face_mask_a_0 = FAN_dst_face_mask_a_0
        elif cfg.mask_mode == 5:
            prd_face_mask_a_0 = FAN_prd_face_mask_a_0 * FAN_dst_face_mask_a_0
        elif cfg.mask_mode == 6:
            prd_face_mask_a_0 = prd_face_mask_a_0 * FAN_prd_face_mask_a_0 * FAN_dst_face_mask_a_0
        elif cfg.mask_mode == 7:
            prd_face_mask_a_0 = prd_face_mask_a_0 * FAN_dst_face_mask_a_0

    prd_face_mask_a_0[prd_face_mask_a_0 < 0.001] = 0.0

    prd_face_mask_a = prd_face_mask_a_0[..., np.newaxis]
    prd_face_mask_aaa = np.repeat(prd_face_mask_a, (3, ), axis=-1)

    img_face_mask_aaa = cv2.warpAffine(prd_face_mask_aaa,
                                       face_output_mat,
                                       img_size,
                                       np.zeros(img_bgr.shape,
                                                dtype=np.float32),
                                       flags=cv2.WARP_INVERSE_MAP
                                       | cv2.INTER_CUBIC)
    img_face_mask_aaa = np.clip(img_face_mask_aaa, 0.0, 1.0)
    img_face_mask_aaa[img_face_mask_aaa <= 0.1] = 0.0  #get rid of noise

    if 'raw' in cfg.mode:
        face_corner_pts = np.array(
            [[0, 0], [output_size - 1, 0], [output_size - 1, output_size - 1],
             [0, output_size - 1]],
            dtype=np.float32)
        square_mask = np.zeros(img_bgr.shape, dtype=np.float32)
        cv2.fillConvexPoly(square_mask, \
                           LandmarksProcessor.transform_points (face_corner_pts, face_output_mat, invert=True ).astype(np.int), \
                           (1,1,1) )

        if cfg.mode == 'raw-rgb':
            out_merging_mask = square_mask

        if cfg.mode == 'raw-rgb' or cfg.mode == 'raw-rgb-mask':
            out_img = cv2.warpAffine(prd_face_bgr, face_output_mat, img_size,
                                     out_img,
                                     cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC,
                                     cv2.BORDER_TRANSPARENT)

        if cfg.mode == 'raw-rgb-mask':
            out_img = np.concatenate(
                [out_img,
                 np.expand_dims(img_face_mask_aaa[:, :, 0], -1)], -1)
            out_merging_mask = square_mask

        elif cfg.mode == 'raw-mask-only':
            out_img = img_face_mask_aaa
            out_merging_mask = img_face_mask_aaa
        elif cfg.mode == 'raw-predicted-only':
            out_img = cv2.warpAffine(prd_face_bgr, face_output_mat, img_size,
                                     np.zeros(img_bgr.shape, dtype=np.float32),
                                     cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC,
                                     cv2.BORDER_TRANSPARENT)
            out_merging_mask = square_mask

        out_img = np.clip(out_img, 0.0, 1.0)
    else:
        #averaging [lenx, leny, maskx, masky] by grayscale gradients of upscaled mask
        ar = []
        for i in range(1, 10):
            maxregion = np.argwhere(img_face_mask_aaa > i / 10.0)
            if maxregion.size != 0:
                miny, minx = maxregion.min(axis=0)[:2]
                maxy, maxx = maxregion.max(axis=0)[:2]
                lenx = maxx - minx
                leny = maxy - miny
                if min(lenx, leny) >= 4:
                    ar += [[lenx, leny]]

        if len(ar) > 0:
            lenx, leny = np.mean(ar, axis=0)
            lowest_len = min(lenx, leny)

            if cfg.erode_mask_modifier != 0:
                ero = int(lowest_len * (0.126 - lowest_len * 0.00004551365) *
                          0.01 * cfg.erode_mask_modifier)
                if ero > 0:
                    img_face_mask_aaa = cv2.erode(img_face_mask_aaa,
                                                  cv2.getStructuringElement(
                                                      cv2.MORPH_ELLIPSE,
                                                      (ero, ero)),
                                                  iterations=1)
                elif ero < 0:
                    img_face_mask_aaa = cv2.dilate(img_face_mask_aaa,
                                                   cv2.getStructuringElement(
                                                       cv2.MORPH_ELLIPSE,
                                                       (-ero, -ero)),
                                                   iterations=1)

            if cfg.clip_hborder_mask_per > 0:  #clip hborder before blur
                prd_hborder_rect_mask_a = np.ones(prd_face_mask_a.shape,
                                                  dtype=np.float32)
                prd_border_size = int(prd_hborder_rect_mask_a.shape[1] *
                                      cfg.clip_hborder_mask_per)
                prd_hborder_rect_mask_a[:, 0:prd_border_size, :] = 0
                prd_hborder_rect_mask_a[:, -prd_border_size:, :] = 0
                prd_hborder_rect_mask_a[-prd_border_size:, :, :] = 0
                prd_hborder_rect_mask_a = np.expand_dims(
                    cv2.blur(prd_hborder_rect_mask_a,
                             (prd_border_size, prd_border_size)), -1)

                img_prd_hborder_rect_mask_a = cv2.warpAffine(
                    prd_hborder_rect_mask_a, face_output_mat, img_size,
                    np.zeros(img_bgr.shape, dtype=np.float32),
                    cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC)
                img_prd_hborder_rect_mask_a = np.expand_dims(
                    img_prd_hborder_rect_mask_a, -1)
                img_face_mask_aaa *= img_prd_hborder_rect_mask_a
                img_face_mask_aaa = np.clip(img_face_mask_aaa, 0, 1.0)

            if cfg.blur_mask_modifier > 0:
                blur = int(lowest_len * 0.10 * 0.01 * cfg.blur_mask_modifier)
                if blur > 0:
                    img_face_mask_aaa = cv2.blur(img_face_mask_aaa,
                                                 (blur, blur))

            img_face_mask_aaa = np.clip(img_face_mask_aaa, 0, 1.0)

            if 'seamless' not in cfg.mode and cfg.color_transfer_mode != 0:
                if cfg.color_transfer_mode == 1:  #rct
                    prd_face_bgr = imagelib.reinhard_color_transfer(
                        np.clip((prd_face_bgr * 255).astype(np.uint8), 0, 255),
                        np.clip((dst_face_bgr * 255).astype(np.uint8), 0, 255),
                        source_mask=prd_face_mask_a,
                        target_mask=prd_face_mask_a)
                    prd_face_bgr = np.clip(
                        prd_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)

                elif cfg.color_transfer_mode == 2:  #lct
                    prd_face_bgr = imagelib.linear_color_transfer(
                        prd_face_bgr, dst_face_bgr)
                    prd_face_bgr = np.clip(prd_face_bgr, 0.0, 1.0)
                elif cfg.color_transfer_mode == 3:  #mkl
                    prd_face_bgr = imagelib.color_transfer_mkl(
                        prd_face_bgr, dst_face_bgr)
                elif cfg.color_transfer_mode == 4:  #mkl-m
                    prd_face_bgr = imagelib.color_transfer_mkl(
                        prd_face_bgr * prd_face_mask_a,
                        dst_face_bgr * prd_face_mask_a)
                elif cfg.color_transfer_mode == 5:  #idt
                    prd_face_bgr = imagelib.color_transfer_idt(
                        prd_face_bgr, dst_face_bgr)
                elif cfg.color_transfer_mode == 6:  #idt-m
                    prd_face_bgr = imagelib.color_transfer_idt(
                        prd_face_bgr * prd_face_mask_a,
                        dst_face_bgr * prd_face_mask_a)

                elif cfg.color_transfer_mode == 7:  #ebs
                    prd_face_bgr = cfg.ebs_ct_func(
                        np.clip((dst_face_bgr * 255), 0, 255).astype(np.uint8),
                        np.clip((prd_face_bgr * 255), 0, 255).astype(np.uint8),
                    )  #prd_face_mask_a
                    prd_face_bgr = np.clip(
                        prd_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)

            if cfg.mode == 'hist-match-bw':
                prd_face_bgr = cv2.cvtColor(prd_face_bgr, cv2.COLOR_BGR2GRAY)
                prd_face_bgr = np.repeat(np.expand_dims(prd_face_bgr, -1),
                                         (3, ), -1)

            if cfg.mode == 'hist-match' or cfg.mode == 'hist-match-bw':
                hist_mask_a = np.ones(prd_face_bgr.shape[:2] + (1, ),
                                      dtype=np.float32)

                if cfg.masked_hist_match:
                    hist_mask_a *= prd_face_mask_a

                white = (1.0 - hist_mask_a) * np.ones(
                    prd_face_bgr.shape[:2] + (1, ), dtype=np.float32)

                hist_match_1 = prd_face_bgr * hist_mask_a + white
                hist_match_1[hist_match_1 > 1.0] = 1.0

                hist_match_2 = dst_face_bgr * hist_mask_a + white
                hist_match_2[hist_match_1 > 1.0] = 1.0

                prd_face_bgr = imagelib.color_hist_match(
                    hist_match_1, hist_match_2, cfg.hist_match_threshold)

            if cfg.mode == 'hist-match-bw':
                prd_face_bgr = prd_face_bgr.astype(dtype=np.float32)

            if 'seamless' in cfg.mode:
                #mask used for cv2.seamlessClone
                img_face_mask_a = img_face_mask_aaa[..., 0:1]

                if cfg.mode == 'seamless2':
                    img_face_mask_a = cv2.warpAffine(
                        img_face_mask_a,
                        face_output_mat, (output_size, output_size),
                        flags=cv2.INTER_CUBIC)

                img_face_seamless_mask_a = None
                for i in range(1, 10):
                    a = img_face_mask_a > i / 10.0
                    if len(np.argwhere(a)) == 0:
                        continue
                    img_face_seamless_mask_a = img_face_mask_a.copy()
                    img_face_seamless_mask_a[a] = 1.0
                    img_face_seamless_mask_a[img_face_seamless_mask_a <= i /
                                             10.0] = 0.0
                    break

            if cfg.mode == 'seamless2':

                face_seamless = imagelib.seamless_clone(
                    prd_face_bgr, dst_face_bgr, img_face_seamless_mask_a)

                out_img = cv2.warpAffine(
                    face_seamless, face_output_mat, img_size, out_img,
                    cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC,
                    cv2.BORDER_TRANSPARENT)
            else:
                out_img = cv2.warpAffine(
                    prd_face_bgr, face_output_mat, img_size, out_img,
                    cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC,
                    cv2.BORDER_TRANSPARENT)

            out_img = np.clip(out_img, 0.0, 1.0)

            if 'seamless' in cfg.mode and cfg.mode != 'seamless2':
                try:
                    #calc same bounding rect and center point as in cv2.seamlessClone to prevent jittering (not flickering)
                    l, t, w, h = cv2.boundingRect(
                        (img_face_seamless_mask_a * 255).astype(np.uint8))
                    s_maskx, s_masky = int(l + w / 2), int(t + h / 2)
                    out_img = cv2.seamlessClone(
                        (out_img * 255).astype(np.uint8), img_bgr_uint8,
                        (img_face_seamless_mask_a * 255).astype(np.uint8),
                        (s_maskx, s_masky), cv2.NORMAL_CLONE)
                    out_img = out_img.astype(dtype=np.float32) / 255.0
                except Exception as e:
                    #seamlessClone may fail in some cases
                    e_str = traceback.format_exc()

                    if 'MemoryError' in e_str:
                        raise Exception(
                            "Seamless fail: " + e_str
                        )  #reraise MemoryError in order to reprocess this data by other processes
                    else:
                        print("Seamless fail: " + e_str)

            out_img = img_bgr * (1 - img_face_mask_aaa) + (out_img *
                                                           img_face_mask_aaa)

            out_face_bgr = cv2.warpAffine(out_img, face_mat,
                                          (output_size, output_size))

            if 'seamless' in cfg.mode and cfg.color_transfer_mode != 0:
                if cfg.color_transfer_mode == 1:
                    face_mask_aaa = cv2.warpAffine(img_face_mask_aaa, face_mat,
                                                   (output_size, output_size))

                    out_face_bgr = imagelib.reinhard_color_transfer(
                        np.clip((out_face_bgr * 255), 0, 255).astype(np.uint8),
                        np.clip((dst_face_bgr * 255), 0, 255).astype(np.uint8),
                        source_mask=face_mask_aaa,
                        target_mask=face_mask_aaa)
                    out_face_bgr = np.clip(
                        out_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
                elif cfg.color_transfer_mode == 2:  #lct
                    out_face_bgr = imagelib.linear_color_transfer(
                        out_face_bgr, dst_face_bgr)
                    out_face_bgr = np.clip(out_face_bgr, 0.0, 1.0)
                elif cfg.color_transfer_mode == 3:  #mkl
                    out_face_bgr = imagelib.color_transfer_mkl(
                        out_face_bgr, dst_face_bgr)
                elif cfg.color_transfer_mode == 4:  #mkl-m
                    out_face_bgr = imagelib.color_transfer_mkl(
                        out_face_bgr * prd_face_mask_a,
                        dst_face_bgr * prd_face_mask_a)
                elif cfg.color_transfer_mode == 5:  #idt
                    out_face_bgr = imagelib.color_transfer_idt(
                        out_face_bgr, dst_face_bgr)
                elif cfg.color_transfer_mode == 6:  #idt-m
                    out_face_bgr = imagelib.color_transfer_idt(
                        out_face_bgr * prd_face_mask_a,
                        dst_face_bgr * prd_face_mask_a)
                elif cfg.color_transfer_mode == 7:  #ebs
                    out_face_bgr = cfg.ebs_ct_func(
                        np.clip((dst_face_bgr * 255), 0, 255).astype(np.uint8),
                        np.clip((out_face_bgr * 255), 0, 255).astype(np.uint8),
                    )
                    out_face_bgr = np.clip(
                        out_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)

            if cfg.mode == 'seamless-hist-match':
                out_face_bgr = imagelib.color_hist_match(
                    out_face_bgr, dst_face_bgr, cfg.hist_match_threshold)

            cfg_mp = cfg.motion_blur_power / 100.0
            if cfg_mp != 0:
                k_size = int(frame_info.motion_power * cfg_mp)
                if k_size >= 1:
                    k_size = np.clip(k_size + 1, 2, 50)
                    if cfg.super_resolution_mode:
                        k_size *= 2
                    out_face_bgr = imagelib.LinearMotionBlur(
                        out_face_bgr, k_size, frame_info.motion_deg)

            if cfg.blursharpen_amount != 0:
                out_face_bgr = cfg.blursharpen_func(out_face_bgr,
                                                    cfg.sharpen_mode, 3,
                                                    cfg.blursharpen_amount)

            new_out = cv2.warpAffine(out_face_bgr, face_mat, img_size,
                                     img_bgr.copy(),
                                     cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC,
                                     cv2.BORDER_TRANSPARENT)
            out_img = np.clip(
                img_bgr * (1 - img_face_mask_aaa) +
                (new_out * img_face_mask_aaa), 0, 1.0)

            if cfg.color_degrade_power != 0:
                out_img_reduced = imagelib.reduce_colors(out_img, 256)
                if cfg.color_degrade_power == 100:
                    out_img = out_img_reduced
                else:
                    alpha = cfg.color_degrade_power / 100.0
                    out_img = (out_img * (1.0 - alpha) +
                               out_img_reduced * alpha)

            if cfg.export_mask_alpha:
                out_img = np.concatenate(
                    [out_img, img_face_mask_aaa[:, :, 0:1]], -1)
        out_merging_mask = img_face_mask_aaa

    return out_img, out_merging_mask
コード例 #43
0
ファイル: Reshape.py プロジェクト: dpsidelnikov/NBA-faces
for image in images:
    print("Working for " + image)
    img = cv2.imread(base + image)
    dets = detector(img, 1)
    rows, cols, ch = img.shape
    for k, d in enumerate(dets):

        # Get the landmarks/parts for the face in box d.
        shape = predictor(img, d)
        sl_eyes = [0, 0]
        sr_eyes = [0, 0]
        s_mouth = [0, 0]
        s_nose = [0, 0]

        sl_eyes, sr_eyes, s_mouth, s_nose = get_params(img, d, sl_eyes,
                                                       sr_eyes, s_mouth,
                                                       s_nose)
        s_mouth = [int(0.5 * x) for x in s_mouth]
        pts1 = np.float32([sl_eyes, sr_eyes, s_mouth])
        pts2 = np.float32([l_eyes, r_eyes, mouth])

        # for i in range(68):
        # 	# cv2.circle(img,(shape.part(i).x,shape.part(i).y),4,(0,0,255))
        # 	cv2.circle(img,(s_mouth[0],s_mouth[1]),4,(0,0,255))

        print(sl_eyes, sr_eyes, s_mouth)
        print(l_eyes, r_eyes, mouth)
        M = cv2.getAffineTransform(pts1, pts2)
        dst = cv2.warpAffine(img, M, (cols, rows))
        cv2.imwrite("Worked/" + image, dst)
コード例 #44
0
    def predict(self, car_pic):
        if type(car_pic) == type(""):
            img = imreadex(car_pic)
        else:
            img = car_pic
        pic_hight, pic_width = img.shape[:2]

        if pic_width > MAX_WIDTH:
            resize_rate = MAX_WIDTH / pic_width
            img = cv2.resize(img, (MAX_WIDTH, int(pic_hight * resize_rate)),
                             interpolation=cv2.INTER_AREA)

        blur = self.cfg["blur"]
        #高斯去噪
        if blur > 0:
            img = cv2.GaussianBlur(img, (blur, blur), 0)  #图片分辨率调整
        oldimg = img
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        #equ = cv2.equalizeHist(img)
        #img = np.hstack((img, equ))
        #去掉图像中不会是车牌的区域
        kernel = np.ones((20, 20), np.uint8)
        img_opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
        img_opening = cv2.addWeighted(img, 1, img_opening, -1, 0)

        #找到图像边缘
        ret, img_thresh = cv2.threshold(img_opening, 0, 255,
                                        cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        img_edge = cv2.Canny(img_thresh, 100, 200)
        #使用开运算和闭运算让图像边缘成为一个整体
        kernel = np.ones((self.cfg["morphologyr"], self.cfg["morphologyc"]),
                         np.uint8)
        img_edge1 = cv2.morphologyEx(img_edge, cv2.MORPH_CLOSE, kernel)
        img_edge2 = cv2.morphologyEx(img_edge1, cv2.MORPH_OPEN, kernel)

        #查找图像边缘整体形成的矩形区域,可能有很多,车牌就在其中一个矩形区域中
        image, contours, hierarchy = cv2.findContours(img_edge2, cv2.RETR_TREE,
                                                      cv2.CHAIN_APPROX_SIMPLE)
        contours = [cnt for cnt in contours if cv2.contourArea(cnt) > Min_Area]
        print('len(contours)', len(contours))
        #一一排除不是车牌的矩形区域
        car_contours = []
        for cnt in contours:
            rect = cv2.minAreaRect(cnt)
            area_width, area_height = rect[1]
            if area_width < area_height:
                area_width, area_height = area_height, area_width
            wh_ratio = area_width / area_height
            #print(wh_ratio)
            #要求矩形区域长宽比在2到5.5之间,2到5.5是车牌的长宽比,其余的矩形排除
            if wh_ratio > 2 and wh_ratio < 5.5:
                car_contours.append(rect)
                box = cv2.boxPoints(rect)
                box = np.int0(box)
                #oldimg = cv2.drawContours(oldimg, [box], 0, (0, 0, 255), 2)
                #cv2.imshow("edge4", oldimg)
                #print(rect)

        print(len(car_contours))

        print("精确定位")
        card_imgs = []
        #矩形区域可能是倾斜的矩形,需要矫正,以便使用颜色定位
        for rect in car_contours:
            if rect[2] > -1 and rect[2] < 1:  #创造角度,使得左、高、右、低拿到正确的值
                angle = 1
            else:
                angle = rect[2]
            rect = (rect[0], (rect[1][0] + 5, rect[1][1] + 5), angle
                    )  #扩大范围,避免车牌边缘被排除

            box = cv2.boxPoints(rect)
            heigth_point = right_point = [0, 0]
            left_point = low_point = [pic_width, pic_hight]
            for point in box:
                if left_point[0] > point[0]:
                    left_point = point
                if low_point[1] > point[1]:
                    low_point = point
                if heigth_point[1] < point[1]:
                    heigth_point = point
                if right_point[0] < point[0]:
                    right_point = point

            if left_point[1] <= right_point[1]:  #正角度
                new_right_point = [right_point[0], heigth_point[1]]
                pts2 = np.float32([left_point, heigth_point,
                                   new_right_point])  #字符只是高度需要改变
                pts1 = np.float32([left_point, heigth_point, right_point])
                M = cv2.getAffineTransform(pts1, pts2)
                dst = cv2.warpAffine(oldimg, M, (pic_width, pic_hight))
                point_limit(new_right_point)
                point_limit(heigth_point)
                point_limit(left_point)
                card_img = dst[int(left_point[1]):int(heigth_point[1]),
                               int(left_point[0]):int(new_right_point[0])]
                card_imgs.append(card_img)
                #cv2.imshow("card", card_img)
                #cv2.waitKey(0)
            elif left_point[1] > right_point[1]:  #负角度

                new_left_point = [left_point[0], heigth_point[1]]
                pts2 = np.float32([new_left_point, heigth_point,
                                   right_point])  #字符只是高度需要改变
                pts1 = np.float32([left_point, heigth_point, right_point])
                M = cv2.getAffineTransform(pts1, pts2)
                dst = cv2.warpAffine(oldimg, M, (pic_width, pic_hight))
                point_limit(right_point)
                point_limit(heigth_point)
                point_limit(new_left_point)
                card_img = dst[int(right_point[1]):int(heigth_point[1]),
                               int(new_left_point[0]):int(right_point[0])]
                card_imgs.append(card_img)
                #cv2.imshow("card", card_img)
                #cv2.waitKey(0)
        #开始使用颜色定位,排除不是车牌的矩形,目前只识别蓝、绿、黄车牌
        colors = []
        for card_index, card_img in enumerate(card_imgs):
            green = yello = blue = black = white = 0
            card_img_hsv = cv2.cvtColor(card_img, cv2.COLOR_BGR2HSV)
            #有转换失败的可能,原因来自于上面矫正矩形出错
            if card_img_hsv is None:
                continue
            row_num, col_num = card_img_hsv.shape[:2]
            card_img_count = row_num * col_num

            for i in range(row_num):
                for j in range(col_num):
                    H = card_img_hsv.item(i, j, 0)
                    S = card_img_hsv.item(i, j, 1)
                    V = card_img_hsv.item(i, j, 2)
                    if 11 < H <= 34 and S > 34:  #图片分辨率调整
                        yello += 1
                    elif 35 < H <= 99 and S > 34:  #图片分辨率调整
                        green += 1
                    elif 99 < H <= 124 and S > 34:  #图片分辨率调整
                        blue += 1

                    if 0 < H < 180 and 0 < S < 255 and 0 < V < 46:
                        black += 1
                    elif 0 < H < 180 and 0 < S < 43 and 221 < V < 225:
                        white += 1
            color = "no"

            limit1 = limit2 = 0
            if yello * 2 >= card_img_count:
                color = "yello"
                limit1 = 11
                limit2 = 34  #有的图片有色偏偏绿
            elif green * 2 >= card_img_count:
                color = "green"
                limit1 = 35
                limit2 = 99
            elif blue * 2 >= card_img_count:
                color = "blue"
                limit1 = 100
                limit2 = 124  #有的图片有色偏偏紫
            elif black + white >= card_img_count * 0.7:  #TODO
                color = "bw"
            print(color)
            colors.append(color)
            print(blue, green, yello, black, white, card_img_count)
            #cv2.imshow("color", card_img)
            #cv2.waitKey(0)
            if limit1 == 0:
                continue
            #以上为确定车牌颜色
            #以下为根据车牌颜色再定位,缩小边缘非车牌边界
            xl, xr, yh, yl = self.accurate_place(card_img_hsv, limit1, limit2,
                                                 color)
            if yl == yh and xl == xr:
                continue
            need_accurate = False
            if yl >= yh:
                yl = 0
                yh = row_num
                need_accurate = True
            if xl >= xr:
                xl = 0
                xr = col_num
                need_accurate = True
            card_imgs[card_index] = card_img[
                yl:yh, xl:xr] if color != "green" or yl < (
                    yh - yl) // 4 else card_img[yl - (yh - yl) // 4:yh, xl:xr]
            if need_accurate:  #可能x或y方向未缩小,需要再试一次
                card_img = card_imgs[card_index]
                card_img_hsv = cv2.cvtColor(card_img, cv2.COLOR_BGR2HSV)
                xl, xr, yh, yl = self.accurate_place(card_img_hsv, limit1,
                                                     limit2, color)
                if yl == yh and xl == xr:
                    continue
                if yl >= yh:
                    yl = 0
                    yh = row_num
                if xl >= xr:
                    xl = 0
                    xr = col_num
            card_imgs[card_index] = card_img[
                yl:yh, xl:xr] if color != "green" or yl < (
                    yh - yl) // 4 else card_img[yl - (yh - yl) // 4:yh, xl:xr]
        #以上为车牌定位
        #以下为识别车牌中的字符
        predict_result = []
        roi = None
        card_color = None
        for i, color in enumerate(colors):
            if color in ("blue", "yello", "green"):
                card_img = card_imgs[i]
                gray_img = cv2.cvtColor(card_img, cv2.COLOR_BGR2GRAY)
                #黄、绿车牌字符比背景暗、与蓝车牌刚好相反,所以黄、绿车牌需要反向
                if color == "green" or color == "yello":
                    gray_img = cv2.bitwise_not(gray_img)
                ret, gray_img = cv2.threshold(
                    gray_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
                #查找水平直方图波峰
                x_histogram = np.sum(gray_img, axis=1)
                x_min = np.min(x_histogram)
                x_average = np.sum(x_histogram) / x_histogram.shape[0]
                x_threshold = (x_min + x_average) / 2
                wave_peaks = find_waves(x_threshold, x_histogram)
                if len(wave_peaks) == 0:
                    print("peak less 0:")
                    continue
                #认为水平方向,最大的波峰为车牌区域
                wave = max(wave_peaks, key=lambda x: x[1] - x[0])
                gray_img = gray_img[wave[0]:wave[1]]
                #查找垂直直方图波峰
                row_num, col_num = gray_img.shape[:2]
                #去掉车牌上下边缘1个像素,避免白边影响阈值判断
                gray_img = gray_img[1:row_num - 1]
                y_histogram = np.sum(gray_img, axis=0)
                y_min = np.min(y_histogram)
                y_average = np.sum(y_histogram) / y_histogram.shape[0]
                y_threshold = (y_min + y_average) / 5  #U和0要求阈值偏小,否则U和0会被分成两半

                wave_peaks = find_waves(y_threshold, y_histogram)

                #for wave in wave_peaks:
                #	cv2.line(card_img, pt1=(wave[0], 5), pt2=(wave[1], 5), color=(0, 0, 255), thickness=2)
                #车牌字符数应大于6
                if len(wave_peaks) <= 6:
                    print("peak less 1:", len(wave_peaks))
                    continue

                wave = max(wave_peaks, key=lambda x: x[1] - x[0])
                max_wave_dis = wave[1] - wave[0]
                #判断是否是左侧车牌边缘
                if wave_peaks[0][1] - wave_peaks[0][
                        0] < max_wave_dis / 3 and wave_peaks[0][0] == 0:
                    wave_peaks.pop(0)

                #组合分离汉字
                cur_dis = 0
                for i, wave in enumerate(wave_peaks):
                    if wave[1] - wave[0] + cur_dis > max_wave_dis * 0.6:
                        break
                    else:
                        cur_dis += wave[1] - wave[0]
                if i > 0:
                    wave = (wave_peaks[0][0], wave_peaks[i][1])
                    wave_peaks = wave_peaks[i + 1:]
                    wave_peaks.insert(0, wave)

                #去除车牌上的分隔点
                point = wave_peaks[2]
                if point[1] - point[0] < max_wave_dis / 3:
                    point_img = gray_img[:, point[0]:point[1]]
                    if np.mean(point_img) < 255 / 5:
                        wave_peaks.pop(2)

                if len(wave_peaks) <= 6:
                    print("peak less 2:", len(wave_peaks))
                    continue
                part_cards = seperate_card(gray_img, wave_peaks)
                for i, part_card in enumerate(part_cards):
                    #可能是固定车牌的铆钉
                    if np.mean(part_card) < 255 / 5:
                        print("a point")
                        continue
                    part_card_old = part_card
                    w = abs(part_card.shape[1] - SZ) // 2

                    part_card = cv2.copyMakeBorder(part_card,
                                                   0,
                                                   0,
                                                   w,
                                                   w,
                                                   cv2.BORDER_CONSTANT,
                                                   value=[0, 0, 0])
                    part_card = cv2.resize(part_card, (SZ, SZ),
                                           interpolation=cv2.INTER_AREA)

                    #part_card = deskew(part_card)
                    part_card = preprocess_hog([part_card])
                    if i == 0:
                        resp = self.modelchinese.predict(part_card)
                        charactor = provinces[int(resp[0]) - PROVINCE_START]
                    else:
                        resp = self.model.predict(part_card)
                        charactor = chr(resp[0])
                    #判断最后一个数是否是车牌边缘,假设车牌边缘被认为是1
                    if charactor == "1" and i == len(part_cards) - 1:
                        if part_card_old.shape[0] / part_card_old.shape[
                                1] >= 7:  #1太细,认为是边缘
                            continue
                    predict_result.append(charactor)
                roi = card_img
                card_color = color
                break

        return predict_result, roi, card_color  #识别到的字符、定位的车牌图像、车牌颜色
コード例 #45
0
def update(val):
    global pt1x, pt1y, pt2x, pt2y, pt3x, pt3y, ptAllx, ptAlly, ptAllRp, ptAllRn, loading_settings, evx, evy, imgR, imgL, img2
    pt1x = spt1x.val
    pt1y = spt1y.val
    pt2x = spt2x.val
    pt2y = spt2y.val
    pt3x = spt3x.val
    pt3y = spt3y.val
    ptAllx = sptAllx.val
    ptAlly = sptAlly.val
    ptAllRp = sptAllRp.val
    ptAllRn = sptAllRn.val
    if (loading_settings == 0):
        #print ('Rebuilding Affine')
        #disparity = stereo_depth_map(rectified_pair)
        #print(imgR.shape)
        rows, cols, _rgb = imgR.shape
        if val == 4:
            ret, imgL = cap.read()
            ret2, imgR = cap2.read()
        if val == 2:
            img2 = cv2.warpAffine(
                imgR,
                cv2.getAffineTransform(
                    np.float32([[0, 0], [rows / 2, cols / 2], [rows, 0]]),
                    np.float32([[0, 0], [evy, evx], [rows, 0]]),
                    #np.float32([[ptAlly+pt1y,ptAllx+pt1x],[ptAlly+pt2y,ptAllx+pt2x+cols],[ptAlly+pt3y+rows,ptAllx+pt3x]])
                ),
                (cols, rows))
        else:  #if val==3:
            imgLwl = imgL.copy()
            cv2.line(imgLwl, (0, 0), (cols, rows), (0, 255, 0), 1)
            cv2.line(imgLwl, (0, rows), (cols, 0), (0, 255, 0), 1)
            imgLwl = imgLwl[:, :, ::-1]
            img1Object.set_data(imgLwl)
            figL.canvas.draw()
            #
            x1 = 0.0
            y1 = 0.0
            x2 = float(cols)
            y2 = 0.0
            x3 = 0.0
            y3 = float(rows)
            x0 = float((x2 + x3) / 2)
            y0 = float((y3 + y2) / 2)
            print((x1 - x0) * cos(radians(ptAllRp)))
            print((y1 - y0) * sin(radians(ptAllRp)))
            nx1 = (x1 - x0) * cos(radians(ptAllRp)) - (y1 - y0) * sin(
                radians(ptAllRp)) + x0
            ny1 = (x1 - x0) * sin(radians(ptAllRp)) + (y1 - y0) * cos(
                radians(ptAllRp)) + y0
            nx2 = (x2 - x0) * cos(radians(ptAllRp)) - (y2 - y0) * sin(
                radians(ptAllRp)) + x0
            ny2 = (x2 - x0) * sin(radians(ptAllRp)) + (y2 - y0) * cos(
                radians(ptAllRp)) + y0
            nx3 = (x3 - x0) * cos(radians(ptAllRp)) - (y3 - y0) * sin(
                radians(ptAllRp)) + x0
            ny3 = (x3 - x0) * sin(radians(ptAllRp)) + (y3 - y0) * cos(
                radians(ptAllRp)) + y0
            #print(cols,rows)
            print(x1, y1, x2, y2, x3, y3, cos(radians(ptAllRp)),
                  sin(radians(ptAllRp)), x0, y0)
            x1 = nx1 + pt1x + ptAllx
            y1 = ny1 + pt1y + ptAlly
            x2 = nx2 + pt2x + ptAllx
            y2 = ny2 + pt2y + ptAlly
            x3 = nx3 + pt3x + ptAllx
            y3 = ny3 + pt3y + ptAlly
            img2 = cv2.warpAffine(
                imgR,
                cv2.getAffineTransform(
                    np.float32([[0, 0], [cols, 0], [0, rows]]),
                    np.float32([[x1, y1], [x2, y2], [x3, y3]])), (cols, rows))
        #else:
        #    img2 = cv2.warpAffine(imgR,cv2.getAffineTransform(
        #    np.float32([[0,0],[0,cols],[rows,0]]),
        #    np.float32([[ptAlly+pt1y,ptAllx+pt1x],[ptAlly+pt2y,ptAllx+pt2x+cols],[ptAlly+pt3y+rows,ptAllx+pt3x]])
        #    ),(cols,rows))
        img2wl = img2.copy()
        cv2.line(img2wl, (0, 0), (cols, rows), (0, 255, 0), 1)
        cv2.line(img2wl, (0, rows), (cols, 0), (0, 255, 0), 1)
        img2wl = img2wl[:, :, ::-1]
        img2Object.set_data(img2wl)
        #cv2.imshow("in2", img2)
        #cv2.waitKey()
        #cv2.destroyAllWindows()
        #print ('Redraw Affine')
        fig1.canvas.draw()
コード例 #46
0
        if key == 32:
            break

print('three points of image 1 ')
qoordinates1 = []
points('MRIF.png', qoordinates1)
#making array of points with float32 type
qoordinates1 = np.array(qoordinates1, dtype='float32')

print('three points of image 2')
qoordinates2 = []
points('MRIS.png', qoordinates2)
#making array of points with float32 type
qoordinates2 = np.array(qoordinates2, dtype='float32')

#calculate transform function
Affine = cv.getAffineTransform(qoordinates2, qoordinates1)
print(Affine)

#Apply the transform function on the image
img = cv.imread('MRIS.png')
affine_transform_image = cv.warpAffine(img, Affine, (1000, 1000))
cv.imshow('new image', affine_transform_image)
cv.waitKey(0)

cv.destroyAllWindows()

# In[ ]:

# In[ ]:
def getImageFromWord(word):
    word = changeCase(word)

    height, width = 512, 512
    img = np.zeros((height, width, 3), np.uint8)
    img[:, :, :] = 255

    font = random.choice([0, 1, 2, 3, 4]) | 16 if random.randint(0, 1) else 0
    bottomLeftCornerOfText = (30, 150)
    fontScale = 5
    fontColor = (0, 0, 0)
    lineType = random.randint(2, 4)
    thickness = 6
    lineType = 8

    while True:
        textsize = cv2.getTextSize(word, font, fontScale, lineType)[0]
        if textsize[0] < width - 20:
            break
        else:
            fontScale -= 1

    # print textsize

    # get coords based on boundary
    textX = (img.shape[1] - textsize[0]) / 2
    textY = (img.shape[0] + textsize[1]) / 2

    # add text centered on image
    cv2.putText(img, word, (textX, textY), font, fontScale, fontColor,
                lineType)

    rotateFlag = random.randint(0, 1)
    if rotateFlag:
        rotateAngle = random.randint(-10, 10)
        M = cv2.getRotationMatrix2D((width / 2, height / 2), rotateAngle, 1)
        img = cv2.warpAffine(img,
                             M, (width, height),
                             borderValue=(255, 255, 255))

    affineFlag = random.randint(0, 1)
    if affineFlag:
        pts1 = np.float32([[10, 10], [200, 50], [50, 200]])
        pts2 = np.float32(
            [[10 + random.randint(-20, 20), 30 + random.randint(-20, 20)],
             [200, 50],
             [50 + random.randint(-20, 20), 200 + random.randint(-20, 20)]])

        M = cv2.getAffineTransform(pts1, pts2)
        img = cv2.warpAffine(img,
                             M, (width, height),
                             borderValue=(255, 255, 255))

    img = cv2.resize(img, (227, 227))
    bg_image = get_random_crop()
    bg_image = merge_background_text(img, bg_image)
    # print(bg_image.shape)
    # img = np.add(img,bg_image)
    # plt.imshow(img)
    # print img.shape

    return bg_image
コード例 #48
0
                               cv2.BORDER_REPLICATE)

    src = np.array([
        (cols * (20.0 / 512.0) + wexp, rows * (200.0 / 512.0) + hexp),
        (cols * (256.0 / 512.0) + wexp, rows * (495.0 / 512.0) + hexp),
        (cols * (492.0 / 512.0) + wexp, rows * (200.0 / 512.0) + hexp)
    ])
    #src = np.array([(20, 200), (256, 495), (492, 200)])
    src = np.uint8(src)
    src = np.float32(src)
    dest = np.array([(shapes.part(0).x - box.left() + wexp,
                      shapes.part(0).y - box.top() + hexp),
                     (shapes.part(8).x - box.left() + wexp,
                      shapes.part(8).y - box.top() + hexp),
                     (shapes.part(16).x - box.left() + wexp,
                      shapes.part(16).y - box.top() + hexp)])
    dest = np.float32(dest)
    rows, cols, d = emoji.shape
    trans = cv2.getAffineTransform(src, dest)
    emoji = cv2.warpAffine(emoji, trans, (cols, rows))

    #print(happy)
    for c in range(0, 3):
        img[box.top() - hexp:box.bottom() + hexp,
            box.left() - wexp:box.right() + wexp,
            c] = emoji[:, :, c] * (emoji[:, :, 3] / 255.0) + img[
                box.top() - hexp:box.bottom() + hexp,
                box.left() - wexp:box.right() + wexp,
                c] * (1.0 - emoji[:, :, 3] / 255.0)
plt.imsave('s.jpg', img)
コード例 #49
0
def rotate(img, angle, fixed=True, point=[-1, -1], scale = 1.0):
        """
        Inspired from SimpleCV

        **PARAMETERS**

        * *angle* - angle in degrees positive is clockwise, negative is counter clockwise
        * *point* - the point about which we want to rotate, if none is defined we use the center.
        * *scale* - and optional floating point scale parameter.

        """
        if( point[0] == -1 or point[1] == -1 ):
            point[0] = (img.shape[1]-1)/2
            point[1] = (img.shape[0]-1)/2

        # first we create what we thing the rotation matrix should be
        rotMat = cv2.getRotationMatrix2D((float(point[0]), float(point[1])), float(angle), float(scale))
        if fixed:
            return cv2.warpAffine(img, M=rotMat, dsize=img.shape)

        A = np.array([0, 0, 1])
        B = np.array([img.shape[1], 0, 1])
        C = np.array([img.shape[1], img.shape[0], 1])
        D = np.array([0, img.shape[0], 1])
        #So we have defined our image ABC in homogenous coordinates
        #and apply the rotation so we can figure out the image size
        a = np.dot(rotMat, A)
        b = np.dot(rotMat, B)
        c = np.dot(rotMat, C)
        d = np.dot(rotMat, D)
        #I am not sure about this but I think the a/b/c/d are transposed
        #now we calculate the extents of the rotated components.
        minY = min(a[1], b[1], c[1], d[1])
        minX = min(a[0], b[0], c[0], d[0])
        maxY = max(a[1], b[1], c[1], d[1])
        maxX = max(a[0], b[0], c[0], d[0])
        #from the extents we calculate the new size
        newWidth = np.ceil(maxX-minX)
        newHeight = np.ceil(maxY-minY)
        #now we calculate a new translation
        tX = 0
        tY = 0
        #calculate the translation that will get us centered in the new image
        if( minX < 0 ):
            tX = -1.0*minX
        elif(maxX > newWidth-1 ):
            tX = -1.0*(maxX-newWidth)

        if( minY < 0 ):
            tY = -1.0*minY
        elif(maxY > newHeight-1 ):
            tY = -1.0*(maxY-newHeight)

        #now we construct an affine map that will the rotation and scaling we want with the
        #the corners all lined up nicely with the output image.
        src = np.float32([(A[0], A[1]), (B[0], B[1]), (C[0], C[1])])
        dst = np.float32([(a[0]+tX, a[1]+tY), (b[0]+tX, b[1]+tY), (c[0]+tX, c[1]+tY)])

        rotMat = cv2.getAffineTransform(src, dst)

        #calculate the translation of the corners to center the image
        #use these new corner positions as the input to cvGetAffineTransform
        retVal = cv2.warpAffine(img, rotMat, (img.shape[1], img.shape[0]))
        return retVal
コード例 #50
0
import cv2
import numpy as np

img = cv2.imread('images/input.jpg')
rows, cols = img.shape[:2]

src_points = np.float32([[0,0], [cols-1,0], [0,rows-1]])
dst_points = np.float32([[0,0], [int(0.6*(cols-1)),0], [int(0.4*(cols-1)),rows-1]])

affine_matrix = cv2.getAffineTransform(src_points, dst_points)
img_output = cv2.warpAffine(img, affine_matrix, (cols,rows))

cv2.imshow('Input', img)
cv2.imshow('Output', img_output)
cv2.waitKey()
コード例 #51
0
ファイル: tp7_a.py プロジェクト: danteruizv8/vision_G2_2020
def imgafin(imagen, r_altura, r_ancho):
    inicio = np.array([pto1, pto2, pto3], dtype=np.float32)
    destino = np.array([(0, r_altura), (0, 0), (r_ancho, 0)], dtype=np.float32)
    matriz = cv2.getAffineTransform(inicio, destino)
    result = cv2.warpAffine(imagen, matriz, (r_ancho, r_altura))
    return result
コード例 #52
0
            en la imagen de salida.
"""
# Creamos una copia de la imagen para mostrar sus puntos usados para la transformacion afin.
image_points = image.copy()
cv2.circle(image_points, (100, 45), 5, (255, 0, 255), -1)
cv2.circle(image_points, (385, 45), 5, (255, 0, 255), -1)
cv2.circle(image_points, (275, 230), 5, (255, 0, 255), -1)
show_with_matplotlib(image_points, 'Antes de la transformacion afin')

# Creamos las matrices con los 3 puntos usados anteriormente y otras matrices
# para las posiciones deseadas de la imagen de salida.
pts_1 = np.float32([[100, 45], [385, 45], [275, 230]])
pts_2 = np.float32([[50, 45], [385, 45], [290, 230]])

# Creamos la matrix de 2x3 basandonos en los puntos anteriores.
M = cv2.getAffineTransform(pts_1, pts_2)
dst_image = cv2.warpAffine(image_points, M, (width, height))

show_with_matplotlib(dst_image, 'Transformacion Afin')

"""
    5-. Transformacion de perspectiva.
        - Usamos cv2.getPerspectiveTransform por que necesitamos 4 pares de
            puntos (coordenadas de un cuadrangulo en la imagen de origen y salida).
"""
# Cramos una copia de la imagen donde se mostraran los puntos.
image_points = image.copy()
cv2.circle(image_points, (450, 65), 5, (255, 0, 255), -1)
cv2.circle(image_points, (517, 65), 5, (255, 0, 255), -1)
cv2.circle(image_points, (431, 164), 5, (255, 0, 255), -1)
cv2.circle(image_points, (552, 164), 5, (255, 0, 255), -1)
コード例 #53
0
                eyes_centers, eyes_means[0], eyes_stds * 1.96)

            most_possible_right_eye, valid_right_eye = valid_object_exist(
                eyes_centers, eyes_means[1], eyes_stds * 1.96)

            most_possible_nose, valid_nose = valid_object_exist(
                noses_centers, nose_means, nose_stds * 1.96)

            # if the facial objects are all valid transfer it and try save / identify the person
            if valid_nose & valid_right_eye & valid_left_eye:
                current_pts = np.float32(
                    np.vstack((most_possible_left_eye, most_possible_right_eye,
                               most_possible_nose)))

                # set up the affine transformation
                affine_transformation = cv2.getAffineTransform(
                    current_pts, ideal_pts)

                # transform the face
                transformed_face = cv2.warpAffine(face, affine_transformation,
                                                  (64, 64))

                # if the program is ran with an extra parameter specify the name of the person
                # then save some images of the person
                if len(sys.argv) > 1:
                    f = 'img/' + str(sys.argv[1]) + '_' + str(int(
                        time.time())) + '.png'
                    cv2.imwrite(f, transformed_face)

                # extract features from the transformed face image using the tuned extractor loaded
                feature = extractor.describe(transformed_face)
コード例 #54
0
 def ImgAffine(img, pts1, pts2):
     rows, cols = img.shape
     Affinematrix = cv2.getAffineTransform(pts1, pts2)
     affine = cv2.warpAffine(img, Affinematrix, (cols, rows))
     return affine
コード例 #55
0
def img_Transform(car_rect, image):  #传入填充掩膜后的最小矩形,原图
    img_h, img_w = image.shape[:2]
    rect_w, rect_h = car_rect[1][0], car_rect[1][1]
    angle = car_rect[2]

    return_flag = False
    if car_rect[2] == 0:  #旋转角度为0
        return_flag = True
    if car_rect[2] == -90 and rect_w < rect_h:  #旋转角度=-90并且矩形的宽<高
        rect_w, rect_h = rect_h, rect_w
        return_flag = True
    if return_flag:
        car_img = image[int(car_rect[0][1] - rect_h / 2):int(car_rect[0][1] +
                                                             rect_h / 2),
                        int(car_rect[0][0] - rect_w / 2):int(car_rect[0][0] +
                                                             rect_w / 2)]
        return car_img

    car_rect = (car_rect[0], (rect_w, rect_h), angle)
    box = cv2.boxPoints(car_rect)  #获取矩形的四个顶点坐标

    heigth_point = right_point = [0, 0]
    left_point = low_point = [car_rect[0][0], car_rect[0][1]]  #矩形中心点坐标(x,y)
    for point in box:
        if left_point[0] > point[0]:
            left_point = point
        if low_point[1] > point[1]:
            low_point = point
        if heigth_point[1] < point[1]:
            heigth_point = point
        if right_point[0] < point[0]:
            right_point = point

    if left_point[1] <= right_point[1]:  # 正角度
        new_right_point = [right_point[0], heigth_point[1]]
        pts1 = np.float32([left_point, heigth_point, right_point])
        pts2 = np.float32([left_point, heigth_point,
                           new_right_point])  # 字符只是高度需要改变
        M = cv2.getAffineTransform(pts1, pts2)
        print('Mat1', M)
        print('pts1_1', pts1)
        print('pts1_2', pts2)
        '''
        仿射变换,其实是将图形在2D平面内做变换,变换前后图片中原来平行的线仍会保持平行,可以想象是将长方形变换为平行四边形
        M=cv2.getAffineTransform(pos1,pos2),其中两个位置就是变换前后的对应位置关系。输出的就是仿射矩阵M,shape为[2,3]
        cv.getAffineTransform将创建一个2x3矩阵,该矩阵将传递给cv.warpAffine。
        '''
        dst = cv2.warpAffine(image, M, (round(img_w * 2), round(img_h * 2)))
        '''
        cv2.warpAffine(src, M, dsize[, dst[, flags[, borderMode[, borderValue]]]]) → dst
                       dsize为输出图像的大小;
                       flags表示插值方式,默认为 flags=cv2.INTER_LINEAR,表示线性插值,此外还有:cv2.INTER_NEAREST(最近邻插值)、cv2.INTER_AREA(区域插值)、cv2.INTER_CUBIC(三次样条插值)、cv2.INTER_LANCZOS4(Lanczos插值)
                       borderMode - 边界像素模式
                       borderValue - 边界填充值; 默认情况下,它为0
        round() 方法返回浮点数x的四舍五入值。round(x,n) 返回浮点数x的四舍五入的小数点后的n位数值
        '''
        car_img = dst[int(left_point[1]):int(heigth_point[1]),
                      int(left_point[0]):int(new_right_point[0])]

    elif left_point[1] > right_point[1]:  # 负角度
        new_left_point = [left_point[0], heigth_point[1]]
        pts1 = np.float32([left_point, heigth_point, right_point])
        pts2 = np.float32([new_left_point, heigth_point,
                           right_point])  # 字符只是高度需要改变
        print('pts2_1', pts1)
        print('pts2_2', pts2)
        M = cv2.getAffineTransform(pts1, pts2)
        print('Mat2', M)
        dst = cv2.warpAffine(image, M, (round(img_w * 2), round(img_h * 2)))
        car_img = dst[int(right_point[1]):int(heigth_point[1]),
                      int(new_left_point[0]):int(right_point[0])]

    return car_img
コード例 #56
0
import cv2 as cv
import numpy as np
import os

image_dir = "../622data/test/businessRuleTask/"

name = os.listdir(image_dir)[0]
image_path = image_dir + name

src = cv.imread(image_path)

srcTri = np.array([[0, 0], [src.shape[1] - 1, 0],
                   [0, src.shape[0] - 1]]).astype(np.float32)
dstTri = np.array([[0, src.shape[1] * 0.33],
                   [src.shape[1] * 0.85, src.shape[0] * 0.25],
                   [src.shape[1] * 0.15,
                    src.shape[0] * 0.7]]).astype(np.float32)
warp_mat = cv.getAffineTransform(srcTri, dstTri)
warp_dst = cv.warpAffine(src, warp_mat, (src.shape[1], src.shape[0]))
# Rotating the image_mat after Warp
center = (warp_dst.shape[1] // 2, warp_dst.shape[0] // 2)
angle = -50
scale = 0.6
rot_mat = cv.getRotationMatrix2D(center, angle, scale)
warp_rotate_dst = cv.warpAffine(warp_dst, rot_mat,
                                (warp_dst.shape[1], warp_dst.shape[0]))
cv.imshow('Source image_mat', src)
cv.imshow('Warp', warp_dst)
cv.imshow('Warp + Rotate', warp_rotate_dst)
cv.waitKey()
コード例 #57
0
# 图片的仿射
import cv2
import numpy as np

img = cv2.imread("girl.jpg", 1)
# cv2.imshow("img", img)
imgInfo = img.shape
height = imgInfo[0]
width = imgInfo[1]
print(imgInfo)
# src 3->dst 3  原图上(左上角 左下角 右上角) 转换到新图片上三个点
# 原图三个点
matSrc = np.float32([[0, 0], [0, height - 1], [width - 1, 0]])
# 新图上的位置
matDst = np.float32([[50, 50], [200, height - 100], [width - 200, 100]])

matAffine = cv2.getAffineTransform(matSrc, matDst)
dst = cv2.warpAffine(img, matAffine, (width, height))

cv2.imshow("affine", dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
コード例 #58
0
def align(img_path):
    '''
Aligns user image with painting image using manual alignment
and facial affine transforms.
params: img_path - path to style image
    '''

    filename1 = img_path
    predictor_path = "shape_predictor_68_face_landmarks.dat"
    global detector, predictor
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(predictor_path)

    img1 = cv2.imread(filename1)
    box = findBiggestFace(img1)
    points, shape = findLandmarks(img1, box)

    img_cropped = cv2.resize(
        img1[box.top() - 50:box.bottom() + 50,
             box.left() - 50:box.right() + 50], (500, 500))
    #img_cropped = cv2.resize(img1, (500,500))

    box1 = findBiggestFace(img_cropped)
    points1, shape1 = findLandmarks(img_cropped, box1)
    TEMPLATE = np.float32(shape1)

    TEMPLATE = TEMPLATE

    facialPoints = [41, 8, 47]

    imgDim = 500

    cap = cv2.VideoCapture(0)
    i = 0

    while (True):
        text_box = 255 * np.ones((80, 400, 3))

        ret, frame = cap.read()

        try:
            Img = frame

            if i == 0:
                box_v = findBiggestFace(Img)
                i = 1
            points_v, shape_v = findLandmarks(Img, box_v)

            lol = cv2.resize(
                Img[box_v.top() - 100:box_v.bottom() + 100,
                    box_v.left() - 100:box_v.right() + 100], (500, 500))
            img_cropped_v = cv2.resize(
                Img[box_v.top() - 50:box_v.bottom() + 50,
                    box_v.left() - 50:box_v.right() + 50], (500, 500))
            img_cropped_v = cv2.resize(Img, (500, 500))
            box_v1 = findBiggestFace(img_cropped_v)
            points1_v, shape1_v = findLandmarks(img_cropped_v, box_v1)

            ###PAINTING NOSE###
            for k in range(27, 36):
                cv2.circle(img_cropped_v,
                           (int(shape1[k][0]), int(shape1[k][1])), 2,
                           (255, 0, 0), -1)

            ##################################################
            ############CONDITIONS FOR MOVEMENT ##############
            ##################################################
            font = cv2.FONT_HERSHEY_SIMPLEX

            ############################
            ####HORIZONTAL MOVEMENT#####
            ############################

            if shape1_v[27][0] - shape1[27][0] > 2 and shape1_v[28][
                    0] - shape1[28][0] > 2 and shape1_v[29][0] - shape1[29][
                        0] > 2 and shape1_v[30][0] - shape1[30][0] > 2:

                font = cv2.FONT_HERSHEY_SIMPLEX
                #### SUBJECTS NOSE###

                for k in range(27, 31):
                    cv2.circle(img_cropped_v,
                               (int(shape1_v[k][0]), int(shape1_v[k][1])), 2,
                               (0, 0, 255), -1)

                flag1 = 1

            elif shape1_v[27][0] - shape1[27][0] < -2 and shape1_v[28][
                    0] - shape1[28][0] < -2 and shape1_v[29][0] - shape1[29][
                        0] < -2 and shape1_v[30][0] - shape1[30][0] < -2:

                #### SUBJECTS NOSE###
                for k in range(27, 31):
                    cv2.circle(img_cropped_v,
                               (int(shape1_v[k][0]), int(shape1_v[k][1])), 2,
                               (0, 0, 255), -1)

                flag1 = 2

            else:

                #### SUBJECTS NOSE###

                for k in range(27, 31):
                    cv2.circle(img_cropped_v,
                               (int(shape1_v[k][0]), int(shape1_v[k][1])), 2,
                               (0, 255, 0), -1)

                flag1 = 0

            ###########################################
            ##########VERTICAL MOVEMENT ###############
            ###########################################

            if shape1_v[31][1] - shape1[31][1] > 2 and shape1_v[32][
                    1] - shape1[32][1] > 2 and shape1_v[33][1] - shape1[33][
                        1] > 2 and shape1_v[34][1] - shape1[34][
                            1] > 2 and shape1_v[35][1] - shape1[35][1] > 2:
                font = cv2.FONT_HERSHEY_SIMPLEX

                #### SUBJECTS NOSE###

                for k in range(31, 36):
                    cv2.circle(img_cropped_v,
                               (int(shape1_v[k][0]), int(shape1_v[k][1])), 2,
                               (0, 0, 255), -1)

                flag2 = 1

            elif shape1_v[31][1] - shape1[31][1] < -2 and shape1_v[32][
                    1] - shape1[32][1] < -2 and shape1_v[33][1] - shape1[33][
                        1] < -2 and shape1_v[34][1] - shape1[34][
                            1] < -2 and shape1_v[35][1] - shape1[35][1] < -2:

                #### SUBJECTS NOSE###
                for k in range(31, 36):
                    cv2.circle(img_cropped_v,
                               (int(shape1_v[k][0]), int(shape1_v[k][1])), 2,
                               (0, 0, 255), -1)

                flag2 = 2

            else:

                #### SUBJECTS NOSE###
                for k in range(31, 36):
                    cv2.circle(img_cropped_v,
                               (int(shape1_v[k][0]), int(shape1_v[k][1])), 2,
                               (0, 255, 0), -1)

                flag2 = 0

            if flag1 == 1 and flag2 == 1:
                cv2.putText(text_box, "Look right and up", (5, 40), font, 1,
                            (0, 0, 255), 1, cv2.LINE_AA)
            elif flag1 == 1 and flag2 == 2:
                cv2.putText(text_box, "Look right and down", (5, 40), font, 1,
                            (0, 0, 255), 1, cv2.LINE_AA)
            elif flag1 == 1 and flag2 == 0:
                cv2.putText(text_box, "Look right", (5, 40), font, 1,
                            (0, 0, 255), 1, cv2.LINE_AA)
            elif flag1 == 2 and flag2 == 1:
                cv2.putText(text_box, "Look left and up", (5, 40), font, 1,
                            (0, 0, 255), 1, cv2.LINE_AA)
            elif flag1 == 2 and flag2 == 2:
                cv2.putText(text_box, "Look left and down", (5, 40), font, 1,
                            (0, 0, 255), 1, cv2.LINE_AA)
            elif flag1 == 2 and flag2 == 0:
                cv2.putText(text_box, "Look left", (5, 40), font, 1,
                            (0, 0, 255), 1, cv2.LINE_AA)
            elif flag1 == 0 and flag2 == 1:
                cv2.putText(text_box, "Look up", (5, 40), font, 1, (0, 0, 255),
                            1, cv2.LINE_AA)
            elif flag1 == 0 and flag2 == 2:
                cv2.putText(text_box, "Look down", (5, 40), font, 1,
                            (0, 0, 255), 1, cv2.LINE_AA)
            elif flag1 == 0 and flag2 == 0:
                cv2.putText(text_box, "Hold pose", (5, 40), font, 1,
                            (0, 255, 0), 1, cv2.LINE_AA)

            cv2.imshow('1', img_cropped_v)
            cv2.imshow('text', text_box)
            cv2.imshow('2', img_cropped)

        except Exception as e:
            pass

        if cv2.waitKey(1) & 0xFF == ord('q'):
            cv2.imwrite('img.jpg', lol)
            #cv2.imwrite('unaligned.jpg', img_cropped_v)
            break

    cap.release()
    cv2.destroyAllWindows()

    ############################################
    ############# WARPING IMAGE ################
    ############################################

    facialPoints = [0, 8, 16]
    facialPoints2 = [36, 45, 57]

    img1 = cv2.resize(cv2.imread(filename1), (500, 500))
    #img1 = cv2.resize(cv2.imread('img.jpg'), (500,500))

    box = findBiggestFace(img1)
    _, TEMPLATE = findLandmarks(img1, box)
    TEMPLATE = np.float32(TEMPLATE)

    TEMPLATE = TEMPLATE / 500

    filename2 = 'img.jpg'

    imgDim = 500

    Img = cv2.resize(cv2.imread(filename2), (500, 500))

    box = findBiggestFace(Img)

    _, landmarks = findLandmarks(Img, box)

    npLandmarks = np.float32(landmarks)
    npfacialPoints = np.array(facialPoints)

    H = cv2.getAffineTransform(npLandmarks[npfacialPoints],
                               500 * TEMPLATE[npfacialPoints])

    thumbnail = cv2.warpAffine(Img, H, (imgDim, imgDim))

    #######################
    ### Second warping ####
    #######################

    Img = thumbnail

    box = findBiggestFace(Img)

    _, landmarks = findLandmarks(Img, box)

    npLandmarks = np.float32(landmarks)
    npfacialPoints = np.array(facialPoints2)

    H1 = cv2.getAffineTransform(npLandmarks[npfacialPoints],
                                500 * TEMPLATE[npfacialPoints])

    thumbnail2 = cv2.warpAffine(Img, H1, (imgDim, imgDim))

    return img1, lol, thumbnail2
        rect2 = cv2.boundingRect(triangle2)
        (x, y, w, h) = rect2

        cropped_tr2_mask = np.zeros((h, w), np.uint8)

        points2 = np.array([[tr2_pt1[0] - x, tr2_pt1[1] - y],
                            [tr2_pt2[0] - x, tr2_pt2[1] - y],
                            [tr2_pt3[0] - x, tr2_pt3[1] - y]], np.int32)

        cv2.fillConvexPoly(cropped_tr2_mask, points2, 255)

        # Warp triangles
        points = np.float32(points)
        points2 = np.float32(points2)
        M = cv2.getAffineTransform(points, points2)
        warped_triangle = cv2.warpAffine(cropped_triangle, M, (w, h))
        warped_triangle = cv2.bitwise_and(warped_triangle,
                                          warped_triangle,
                                          mask=cropped_tr2_mask)

        # Reconstructing destination face
        img2_new_face_rect_area = img2_new_face[y:y + h, x:x + w]
        img2_new_face_rect_area_gray = cv2.cvtColor(img2_new_face_rect_area,
                                                    cv2.COLOR_BGR2GRAY)
        _, mask_triangles_designed = cv2.threshold(
            img2_new_face_rect_area_gray, 1, 255, cv2.THRESH_BINARY_INV)
        warped_triangle = cv2.bitwise_and(warped_triangle,
                                          warped_triangle,
                                          mask=mask_triangles_designed)
コード例 #60
0
def elastic_transform(im, alpha=0.5, sigma=0.2, affine_sigma=1.):
    """
    Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
    elastic deformation of images as described in [Simard2003]
    """
    # fixme : not implemented for multi channel !
    import cv2

    islist = isinstance(im, (tuple, list))
    ima = im[0] if islist else im

    # image shape
    shape = ima.shape
    shape_size = shape[:2]

    # Random affine transform
    center_square = np.float32(shape_size) // 2
    square_size = min(shape_size) // 3
    pts1 = np.float32([
        center_square + square_size,
        [center_square[0] + square_size, center_square[1] - square_size],
        center_square - square_size
    ])
    pts2 = pts1 + np.random.uniform(
        -affine_sigma, affine_sigma, size=pts1.shape).astype(np.float32)
    M = cv2.getAffineTransform(pts1, pts2)
    if islist:
        res = []
        for i, ima in enumerate(im):
            if i == 0:
                res.append(
                    cv2.warpAffine(ima,
                                   M,
                                   shape_size[::-1],
                                   borderMode=cv2.BORDER_REFLECT_101))
            else:
                res.append(cv2.warpAffine(ima, M, shape_size[::-1]))
        im = res
    else:
        ima = cv2.warpAffine(ima,
                             M,
                             shape_size[::-1],
                             borderMode=cv2.BORDER_REFLECT_101)
        # ima = cv2.warpAffine(ima, M, shape_size[::-1])

    # fast gaussian filter
    blur_size = int(4 * sigma) | 1
    dx = cv2.GaussianBlur((np.random.rand(*shape) * 2 - 1),
                          ksize=(blur_size, blur_size),
                          sigmaX=sigma) * alpha
    dy = cv2.GaussianBlur((np.random.rand(*shape) * 2 - 1),
                          ksize=(blur_size, blur_size),
                          sigmaX=sigma) * alpha

    # remap
    x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))
    map_x, map_y = (y + dy).astype('float32'), (x + dx).astype('float32')

    def remap(data):
        r = cv2.remap(data,
                      map_y,
                      map_x,
                      interpolation=cv2.INTER_LINEAR,
                      borderMode=cv2.BORDER_REFLECT_101)
        return r[..., np.newaxis]

    if islist:
        return tuple([remap(ima) for ima in im])
    else:
        return remap(ima)