示例#1
0
def test_similarity_estimation():
    # exact solution
    tform = estimate_transform('similarity', SRC[:2, :], DST[:2, :])
    assert_almost_equal(tform(SRC[:2, :]), DST[:2, :])
    assert_equal(tform.params[0, 0], tform.params[1, 1])
    assert_equal(tform.params[0, 1], -tform.params[1, 0])

    # over-determined
    tform2 = estimate_transform('similarity', SRC, DST)
    assert_almost_equal(tform2.inverse(tform2(SRC)), SRC)
    assert_equal(tform2.params[0, 0], tform2.params[1, 1])
    assert_equal(tform2.params[0, 1], -tform2.params[1, 0])

    # via estimate method
    tform3 = SimilarityTransform()
    tform3.estimate(SRC, DST)
    assert_almost_equal(tform3.params, tform2.params)
示例#2
0
def alignment(src_img, landmarks):
    ref_pts = [
        REF_LEFT_EYE, REF_RIGHT_EYE, REF_NOSE, REF_LEFT_MOUTH_CORNER,
        REF_RIGHT_MOUTH_CORNER
    ]
    crop_size = (TARGET_IMG_WIDTH, TARGET_IMG_HEIGHT)

    s = np.array(ref_pts).astype(np.float32)
    r = np.array(landmarks).astype(np.float32)

    tfm = SimilarityTransform()
    tfm.estimate(r, s)
    M = tfm.params[0:2, :]

    face_img = cv2.warpAffine(src_img, M, crop_size)

    return face_img
示例#3
0
文件: calib.py 项目: nbhr/pycalib
def absolute_orientation(p, q, no_scaling=False):
    """
    Returns R, t, s satisfying q = s * R * p + t
    
    p and q must be 3xN matrices.
    """

    if no_scaling:
        st = EuclideanTransform()
    else:
        st = SimilarityTransform()

    st.estimate(p.T, q.T)
    R = st.params[:3, :3]
    t = st.params[:3, 3]
    s = np.linalg.norm(R) / np.sqrt(3)
    R = R / s
    return R, t, s
示例#4
0
 def reshape_stim(self, stim):
     if isinstance(stim, (ImageStimulus, VideoStimulus)):
         # In the more general case, the implant might not have a 'shape'
         # attribute or have a hex grid. Idea:
         # - Fit electrode locations to a rectangular grid
         # - Downscale the image to that grid size
         # - Index into grid to determine electrode activation
         data = stim.rgb2gray()
         if hasattr(self.earray, 'rot'):
             # We need to rotate the array & image first, otherwise we may
             # end up with an infinitesimally small (dx,dy); for example,
             # when a rectangular grid is rotated by 1deg:
             tf = SimilarityTransform(rotation=np.deg2rad(self.earray.rot))
             x, y = np.array([tf.inverse([e.x, e.y])
                              for e in self.electrode_objects]).squeeze().T
             data = data.rotate(self.earray.rot)
         else:
             x = [e.x for e in self.electrode_objects]
             y = [e.y for e in self.electrode_objects]
         # Determine grid step by finding the greatest common denominator:
         dx = abs(reduce(lambda a, b: c_gcd(a, b), np.diff(x)))
         dy = abs(reduce(lambda a, b: c_gcd(a, b), np.diff(y)))
         # Build a new rectangular grid:
         try:
             grid = Grid2D((np.min(x), np.max(x)), (np.min(y), np.max(y)),
                           step=(dx, dy))
         except MemoryError:
             raise ValueError("Automatic stimulus reshaping failed. You "
                              "will need to resize the stimulus yourself "
                              "so that there is one activation value per "
                              "electrode.")
         # For each electrode, find the closest pixel on the grid:
         kdtree = cKDTree(np.vstack((grid.x.ravel(), grid.y.ravel())).T)
         _, e_idx = kdtree.query(np.vstack((x, y)).T)
         data = data.resize(grid.x.shape).data[e_idx, ...].squeeze()
         # Sample the stimulus at the correct pixel locations:
         return Stimulus(data, electrodes=self.electrode_names,
                         time=stim.time, metadata=stim.metadata)
     else:
         err_str = (f"Number of electrodes in the stimulus ({len(stim.electrodes)}) "
                    f"does not match the number of electrodes in "
                    f"the implant ({self.n_electrodes}).")
         raise ValueError(err_str)
     return stim
示例#5
0
def compute_transformation_matrix(img, landmark, normalize, target_face_scale=1.0):

    std_pts = _standard_face_pts()  # [-1,1]
    target_pts = (std_pts * target_face_scale + 1) / 2 * 256.0

    # print(target_pts)

    h, w, c = img.shape
    if normalize == True:
        landmark[:, 0] = landmark[:, 0] / h * 2 - 1.0
        landmark[:, 1] = landmark[:, 1] / w * 2 - 1.0

    # print(landmark)

    affine = SimilarityTransform()

    affine.estimate(target_pts, landmark)

    return affine.params
示例#6
0
def warp_images(image0, image1, transform):
    r, c = image1.shape[:2]
    # Note that transformations take coordinates in (x, y) format,
    # not (row, column), in order to be consistent with most literature
    corners = np.array([[0, 0], [0, r], [c, 0], [c, r]])

    # Warp the image corners to their new positions
    warped_corners = transform(corners)

    # Find the extents of both the reference image and the warped
    # target image
    all_corners = np.vstack((warped_corners, corners))

    corner_min = np.min(all_corners, axis=0)
    corner_max = np.max(all_corners, axis=0)

    output_shape = (corner_max - corner_min)
    output_shape = np.ceil(output_shape[::-1])

    offset = SimilarityTransform(translation=-corner_min)

    image0_ = warp(image0, offset.inverse, output_shape=output_shape, cval=-1)

    image1_ = warp(image1, (transform + offset).inverse,
                   output_shape=output_shape,
                   cval=-1)

    image0_zeros = warp(image0,
                        offset.inverse,
                        output_shape=output_shape,
                        cval=0)

    image1_zeros = warp(image1, (transform + offset).inverse,
                        output_shape=output_shape,
                        cval=0)

    overlap = (image0_ != -1.0).astype(int) + (image1_ != -1.0).astype(int)
    overlap += (overlap < 1).astype(int)
    merged = (image0_zeros + image1_zeros) / overlap

    im = Image.fromarray((merged).astype('uint8'))
    im.save('stitched_images.jpg')
    im.show()
示例#7
0
def make_mnist(img):
    # Padding

    # height - number of rows
    # with - number of columns
    height, width = img.shape
    padding = 450

    # Add Pading Around Image
    tmp = np.zeros((height + 2 * padding, width + 2 * padding)).astype(int)
    tmp[padding:padding + height, padding:padding + width] = img

    # Computing the bounding box
    nonzY, nonzX = np.where(tmp)
    min_y, min_x = nonzY.min(), nonzX.min()
    max_y, max_x = nonzY.max(), nonzX.max()

    boundingBoxWith = max_x - min_x
    boundingBoxHeight = max_y - min_y

    if boundingBoxWith < boundingBoxHeight:
        max_x = min_x + boundingBoxHeight

    if boundingBoxWith > boundingBoxHeight:
        max_y = min_y + boundingBoxWith

    img = resize(tmp[min_y:max_y, min_x:max_x].astype(float), (20, 20))

    # Now inserting the 20x20 image
    tmp = np.zeros((28, 28))
    tmp[0:20, 0:20] = img

    # Calculating translation

    Y, X = np.where(tmp)
    height, width = tmp.shape

    tsy, tsx = np.round(height / 2 - Y.mean()), np.round(width / 2 - X.mean())

    # Moving the digit
    tf = SimilarityTransform(translation=(-tsx, -tsy))
    tmp = warp(tmp, tf)
    return np.round(tmp).astype(int)
示例#8
0
    def transform_for_clip(self,
                           video_id,
                           dst_w=720,
                           dst_h=360,
                           points_random_shift=0):
        points = self.ruler_points[video_id]

        ruler_points = np.array([[points.x1, points.y1],
                                 [points.x2, points.y2]])
        img_points = np.array([[dst_w * 0.1, dst_h / 2],
                               [dst_w * 0.9, dst_h / 2]])

        if points_random_shift > 0:
            img_points += np.random.uniform(-points_random_shift,
                                            points_random_shift, (2, 2))

        tform = SimilarityTransform()
        tform.estimate(dst=ruler_points, src=img_points)

        return tform
示例#9
0
def get_final_center_warps(image_collection, simple_center_warps):
    """Find final transformations.

        image_collection (Tuple[N]) : list of all images
        simple_center_warps (Tuple[N])  : transformations unadjusted for shift

        Returns:
            Tuple[N] : final transformations
        """
    # your code here
    corners = tuple(get_corners(image_collection, simple_center_warps))
    bound = get_min_max_coords(corners)
    min_y = bound[0][0]
    min_x = bound[0][1]
    for warp in simple_center_warps:
        shift = SimilarityTransform(translation=(-min_x, -min_y))
        warp.params = np.matmul(shift.params, warp.params)
    shape = np.array([bound[1][1] - bound[0][1], bound[1][0] - bound[0][0]],
                     dtype=int)
    return simple_center_warps, shape
示例#10
0
def compute_transformation_matrix(img,
                                  landmark,
                                  normalize,
                                  target_face_scale=1.0,
                                  inverse=False):
    std_pts = _standard_face_pts()  # [-1,1]
    target_pts = (std_pts * target_face_scale + 1) / 2 * 256.0

    h, w, c = img.shape
    if normalize:
        landmark[:, 0] = landmark[:, 0] / h * 2 - 1.0
        landmark[:, 1] = landmark[:, 1] / w * 2 - 1.0

    affine = SimilarityTransform()
    if inverse:
        affine.estimate(landmark, target_pts)
    else:
        affine.estimate(target_pts, landmark)

    return affine
示例#11
0
def test_union():
    tform1 = SimilarityTransform(scale=0.1, rotation=0.3)
    tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
    tform3 = SimilarityTransform(scale=0.1**2, rotation=0.3 + 0.9)
    tform = tform1 + tform2
    assert_almost_equal(tform.params, tform3.params)

    tform1 = AffineTransform(scale=(0.1, 0.1), rotation=0.3)
    tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
    tform3 = SimilarityTransform(scale=0.1**2, rotation=0.3 + 0.9)
    tform = tform1 + tform2
    assert_almost_equal(tform.params, tform3.params)
    assert tform.__class__ == ProjectiveTransform

    tform = AffineTransform(scale=(0.1, 0.1), rotation=0.3)
    assert_almost_equal((tform + tform.inverse).params, np.eye(3))

    tform1 = SimilarityTransform(scale=0.1, rotation=0.3)
    tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
    tform3 = SimilarityTransform(scale=0.1 * 1 / 0.1, rotation=0.3 - 0.9)
    tform = tform1 + tform2.inverse
    assert_almost_equal(tform.params, tform3.params)
示例#12
0
    def compute_transformation(points: np.ndarray,
                               reference: np.ndarray) -> np.ndarray:
        """Obtain a tranformation for aligning key points to
        reference positions

        Arguments
        ---------
        points:
            A sequence of points to be mapped onto the reference points,
            given as (x,y) coordinates
        reference:
            A sequence with the same number of points serving as reference
            points to which `points` should be moved.

        """
        transformation = SimilarityTransform()
        transformation.estimate(reference, points)
        # transform.params: 3x3 matrix, projective coordinates,
        # last row [0,0,1]
        return transformation
示例#13
0
def shift_image(img, shift_cols, shift_rows):
    """Shift the image foreground

    This function shifts the center of mass (CoM) of the image by the
    specified number of rows and columns.
    The background of the image is assumed to be black (0 grayscale).

    .. versionadded:: 0.7

    Parameters
    ----------
    img : ndarray
        A 2D NumPy array representing a (height, width) grayscale image, or a
        3D NumPy array representing a (height, width, channels) RGB image
    shift_cols : float
        Number of columns by which to shift the CoM.
        Positive: to the right, negative: to the left
    shift_rows : float
        Number of rows by which to shift the CoM.
        Positive: downward, negative: upward

    Returns
    -------
    img : ndarray
        A copy of the shifted image

    """
    if img.ndim < 2 or img.ndim > 3:
        raise ValueError("Only 2D and 3D images are allowed, not "
                         "%dD." % img.ndim)
    tf = SimilarityTransform(translation=[shift_cols, shift_rows])
    img_warped = warp(img, tf.inverse)
    # Warp automatically converts to double, so we need to convert the image
    # back to its original format:
    if img.dtype == bool:
        return img_as_bool(img_warped)
    if img.dtype == np.uint8:
        return img_as_ubyte(img_warped)
    if img.dtype == np.float32:
        return img_as_float32(img_warped)
    return img_warped
示例#14
0
    def test_register_npma(self):
        from skimage.transform import SimilarityTransform
        transf = SimilarityTransform(rotation=np.pi/2., translation=(1, 0))
        nparr = np.array([[0., 1.], [2., 3.]])
        mask = [[True, False], [False, False]]

        ma = np.ma.array(nparr, mask=mask)
        registered_img, footp = aa.apply_transform(
            transf, ma, ma, propagate_mask=True)
        err = np.linalg.norm(registered_img - np.array([[2., 0.], [3., 1.]]))
        self.assertLess(err, 1E-6)
        err_mask = (footp == np.array([[False, True], [False, False]]))
        self.assertTrue(all(err_mask.flatten()))

        ma = np.ma.array(nparr)
        registered_img, footp = aa.apply_transform(
            transf, ma, ma, propagate_mask=True)
        err = np.linalg.norm(registered_img - np.array([[2., 0.], [3., 1.]]))
        self.assertLess(err, 1E-6)
        err_mask = (footp == np.array([[False, False], [False, False]]))
        self.assertTrue(all(err_mask.flatten()))
示例#15
0
    def test_register_nddata(self):
        from astropy.nddata import NDData
        from skimage.transform import SimilarityTransform
        transf = SimilarityTransform(rotation=np.pi/2., translation=(1, 0))

        nd = NDData([[0., 1.], [2., 3.]], mask=[[True, False], [False, False]])
        registered_img, footp = aa.apply_transform(
            transf, nd, nd, propagate_mask=True)
        err = np.linalg.norm(registered_img - np.array([[2., 0.], [3., 1.]]))
        self.assertLess(err, 1E-6)
        err_mask = (footp == np.array([[False, True], [False, False]]))
        self.assertTrue(all(err_mask.flatten()))

        # Test now if there is no assigned mask during creation
        nd = NDData([[0., 1.], [2., 3.]])
        registered_img, footp = aa.apply_transform(
            transf, nd, nd, propagate_mask=True)
        err = np.linalg.norm(registered_img - np.array([[2., 0.], [3., 1.]]))
        self.assertLess(err, 1E-6)
        err_mask = (footp == np.array([[False, False], [False, False]]))
        self.assertTrue(all(err_mask.flatten()))
示例#16
0
def generate_init_tfs(pairs, n_slcs, n_tiles):
    """Find the transformation of each tile to tile[0,0]."""
    tf0 = SimilarityTransform()
    init_tfs = np.empty([n_slcs, n_tiles, 3])
    for pair in pairs:
        p, _, _, model, _ = pair
        if (p[0][1] == 0) & (p[0][0] == p[1][0]
                             ):  # referenced to tile 0 within the same slice
            tf1 = tf0.__add__(model)
            itf = [
                math.acos(min(tf1.params[0, 0],
                              1)),  # FIXME!!! with RigidTransform
                tf1.params[0, 2],
                tf1.params[1, 2]
            ]
            init_tfs[p[1][0], p[1][1], :] = np.array(itf)
        if (p[0][1] == p[1][1] == 0) & (
                p[1][0] - p[0][0] == 1):  # if [slcX,tile0] to [slcX-1,tile0]
            tf0 = tf0.__add__(model)

    return init_tfs
示例#17
0
def get_optical_flow(prevOutput, targetPoints, target_frame, prev_target_frame,
                     frame_no):
    p0 = np.asarray(targetPoints).astype(np.float32)[:, :, None]
    p0 = np.transpose(p0, (0, 2, 1))
    old_gray = cv2.cvtColor(prev_target_frame, cv2.COLOR_BGR2GRAY)
    frame_gray = cv2.cvtColor(target_frame, cv2.COLOR_BGR2GRAY)

    p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None,
                                           **lk_params)

    # Select good points
    good_new = p1[st == 1]
    good_old = p0[st == 1]

    newOutput = np.copy(prevOutput)

    transform = SimilarityTransform()
    if transform.estimate(good_old, good_new):
        newOutput = transform_image(good_old, good_new, prevOutput,
                                    target_frame, frame_no)
    return newOutput, tuplify(good_new.tolist())
示例#18
0
 def get_face(img, dst, target_size=(112, 112)):
     """
     :param img: image
     :param dst:
     :param target_size:
     :return:
     """
     src = np.array(
         [
             [38.2946, 51.6963],
             [73.5318, 51.5014],
             [56.0252, 71.7366],
             [41.5493, 92.3655],
             [70.7299, 92.2041],
         ],
         dtype=np.float32,
     )
     tform = SimilarityTransform()
     tform.estimate(dst, src)
     tmatrix = tform.params[0:2, :]
     return cv2.warpAffine(img, tmatrix, target_size, borderValue=0.0)
示例#19
0
    def test_register_ccddata(self):
        from ccdproc import CCDData
        from skimage.transform import SimilarityTransform
        transf = SimilarityTransform(rotation=np.pi/2., translation=(1, 0))

        cd = CCDData(
            [[0., 1.], [2., 3.]],
            mask=[[True, False], [False, False]], unit='adu')
        registered_img, footp = aa.apply_transform(
            transf, cd, cd, propagate_mask=True)
        err = np.linalg.norm(registered_img - np.array([[2., 0.], [3., 1.]]))
        self.assertLess(err, 1E-6)
        err_mask = (footp == np.array([[False, True], [False, False]]))
        self.assertTrue(all(err_mask.flatten()))

        cd = CCDData([[0., 1.], [2., 3.]], unit='adu')
        registered_img, footp = aa.apply_transform(
            transf, cd, cd, propagate_mask=True)
        err = np.linalg.norm(registered_img - np.array([[2., 0.], [3., 1.]]))
        self.assertLess(err, 1E-6)
        err_mask = (footp == np.array([[False, False], [False, False]]))
        self.assertTrue(all(err_mask.flatten()))
示例#20
0
    def ImageTransform(self, Img, params, OutImg):
        """
        Function for similarity transform of given input Image
        Input:
            Img: nd-array
            params: vector (length 4) with transformation parameters
                r (scaling factor), angle (rotation angle) t_1 and t_2
                (translation vector)
            OutImg: Target image. Required to know correct dimensions for output image
        Returns:
            nd-Image array
        """

        # get transformation params
        r, alpha, t1, t2 = params

        # Apply params to coordinate matrices
        trafo = SimilarityTransform(matrix=None, scale=r, rotation=alpha,
                                    translation=[t1, t2])


        return warp(Img, trafo.inverse, output_shape=OutImg.shape)
示例#21
0
def calculate_offset(sentinel2_image, landsat8_image):
    """
    Calculates offset . Recieves single band images as xarray.DataArray objects
    with only 'y' and 'x' dimensions and coordinates.
    
    Args:
        sentinel2_image (xarray.DataArray): reference image (y,x)
        landsat8_image (xarray.DataArray): secondary image with offset (y,x)
    
    Returns:
        shift ([float,float]): list with (y,x) subpixel offset
    """
    
    ## TO-DO: test if images have the same dimension
    
    image = np.nan_to_num(sentinel2_image)
    offset_image = np.nan_to_num(landsat8_image)
    
    # 1-pixel precison (sub pixel correction requires order-1+ warp with interpolation
    shift, error, diffphase = register_translation(image, offset_image)
    
    print("Detected pixel offset (y, x): {}".format(shift))
    
    # check correction
    tform = SimilarityTransform(translation=(-shift[1],shift[0])) #inverted y-axis rasters
    warped = xr.apply_ufunc(__warp_clip__,landsat8_image.load(),
                   kwargs={'inverse_map':tform,
                           'dtype_':landsat8_image.dtype,
                           'order':0,
                           'preserve_range':True})
    
    corr_shift, corr_error, corr_diffphase = register_translation(image,
                                                                  warped,
                                                                  100)
    
    print("Sub-pixel offset after correction in reference image (y, x): {}".format(corr_shift))
    
    return shift
    
def apply_random_transformation(background_size,
                                segmented_box,
                                margin=0.1,
                                max_obj_size_in_bg=0.4):
    """apply a random transformation to 2D coordinates nomalized to image size"""
    orig_coords_norm = segmented_box.segmented_coords_homog_norm[:, :2]
    # translate object coordinates to the object center's frame, i.e. whitens
    whitened_coords_norm = orig_coords_norm - (segmented_box.x_center_norm,
                                               segmented_box.y_center_norm)

    # then generate a random rotation around the z-axis (perpendicular to the image plane), and limit the object scale
    # to maximum (default) 50% of the background image, i.e. the normalized largest dimension of the object must be at
    # most 0.5. To put it simply, scale the objects down if they're too big.
    # TODO(minhnh) add shear
    max_scale = 1.
    if segmented_box.max_dimension_norm > max_obj_size_in_bg:
        max_scale = max_obj_size_in_bg / segmented_box.max_dimension_norm
    random_rot_angle = np.random.uniform(0, np.pi)
    rand_scale = np.random.uniform(0.5 * max_scale, max_scale)

    # generate a random translation within the image boundaries for whitened, normalized coordinates, taking into
    # account the maximum allowed object dimension. After this translation, the normalized coordinates should
    # stay within [margin, 1-margin] for each dimension
    scaled_max_dimension = segmented_box.max_dimension_norm * max_scale
    low_norm_bound, high_norm_bound = ((scaled_max_dimension / 2) + margin,
                                       1 - margin - (scaled_max_dimension / 2))
    random_translation_x = np.random.uniform(low_norm_bound, high_norm_bound)
    random_translation_y = np.random.uniform(low_norm_bound, high_norm_bound)

    # create the transformation matrix for the generated rotation, translation and scale
    tf_matrix = SimilarityTransform(rotation=random_rot_angle,
                                    scale=rand_scale,
                                    translation=(random_translation_x,
                                                 random_translation_y)).params

    # apply transformation
    transformed_coords_norm = matrix_transform(whitened_coords_norm, tf_matrix)
    return transformed_coords_norm
示例#23
0
文件: test.py 项目: gbrammer/tristars
def generate_catalogs(seed=1, N1=50, N2=10, tr=[40, 50], rot=0.1, err=0.1):
    """
    Generate test catalogs for matching
    """
    import numpy as np
    from skimage.transform import SimilarityTransform

    np.random.seed(seed)

    V1 = np.random.rand(N1, 2) * 1000 - 500

    # Transform and add noise to second catalog, drawn from the first
    draw_ix = np.unique(np.cast[int](np.random.rand(N2) * N1))
    N2 = len(draw_ix)

    tf = SimilarityTransform(matrix=None,
                             scale=1.,
                             rotation=rot,
                             translation=tr)

    V2 = tf(V1[draw_ix, :]) + np.random.normal(size=(N2, 2)) * err

    return V1, V2, tf
示例#24
0
    def estimate_norm(lmk):
        assert lmk.shape == (5, 2)

        tform = SimilarityTransform()
        lmk_tran = np.insert(lmk, 2, values=np.ones(5), axis=1)
        min_M = []
        min_index = []
        min_error = np.inf
        src = ARCFACE_SRC

        for i in np.arange(src.shape[0]):
            tform.estimate(lmk, src[i])
        M = tform.params[0:2, :]

        results = np.dot(M, lmk_tran.T)
        results = results.T
        error = np.sum(np.sqrt(np.sum((results - src[i]) ** 2, axis=1)))

        if min_error > error:
            min_M = M
            min_index = i

        return min_M, min_index
 def align(self,
           image: np.ndarray) -> Tuple[List[Any], List[Any], List[Any]]:
     ret = self.model.detect_face(image, det_type=0)
     if ret is None:
         return [], [], []
     bounding_boxes, landmarks = ret
     if bounding_boxes.shape[0] == 0:
         return [], [], []
     reference_facial_points = np.array(
         [[30.29459953, 51.69630051], [65.53179932, 51.50139999],
          [48.02519989, 71.73660278], [33.54930115, 92.3655014],
          [62.72990036, 92.20410156]],
         dtype=np.float32)
     reference_facial_points[:, 0] += 8.
     transform = SimilarityTransform()
     faces = []
     for landmark in landmarks:
         tmp_landmark = np.array(landmark, dtype=np.float32).reshape(
             (2, 5)).T
         transform.estimate(tmp_landmark, reference_facial_points)
         M = transform.params[0:2, :]
         warped_face = cv2.warpAffine(image, M, (112, 112), borderValue=0.0)
         faces.append(warped_face)
     return bounding_boxes, landmarks, faces
示例#26
0
# DEL0 = aligned_crop[1]-aligned_crop[0]
# DEL1 = aligned_crop[2]-aligned_crop[1]
# DEL2 = aligned_crop[3]-aligned_crop[2]
# DEL3 = aligned_crop[4]-aligned_crop[3]
# deltas = [DEL0,DEL1,DEL2,DEL3]
# =============================================================================



from skimage.transform import SimilarityTransform, warp
import matplotlib.pyplot as plt
import numpy as np

### start with map with largest offset
img2 = FS3.scan332
SHFT2 = SimilarityTransform(translation=(-44, -19))
# shift (translate) all imported channels
shifted_channels2 = []
for i, channel in enumerate(channels):
    IMG2_SHIFT = warp(img2[i,:,:-2], SHFT2)
    shifted_channels2.append(IMG2_SHIFT)
# store shifted channels
shifted_channels2 = np.array(shifted_channels2)

### make mask that defines area to crop
mask = IMG2_SHIFT!=0
mask = mask.astype(int)

### mask the map used as alignemnt reference
img0 = FS3.scan323
shifted_channels0 = []
示例#27
0
def test_inverse():
    tform = SimilarityTransform(scale=0.5, rotation=0.1)
    inverse_tform = SimilarityTransform(matrix=np.linalg.inv(tform.params))
    image = np.arange(10 * 10).reshape(10, 10).astype(np.double)
    assert_equal(warp(image, inverse_tform), warp(image, tform.inverse))
示例#28
0
def test_invalid():
    with testing.raises(ValueError):
        warp(np.ones((4, 3, 3, 3)), SimilarityTransform())
示例#29
0
def test_warp_coords_example():
    image = data.astronaut().astype(np.float32)
    assert 3 == image.shape[2]
    tform = SimilarityTransform(translation=(0, -10))
    coords = warp_coords(tform, (30, 30, 3))
    map_coordinates(image[:, :, 0], coords[:2])
示例#30
0
Utils.showImage(img)

# quadruple image canvas size and center digit
scale = 4
img2 = np.full((scale * img.shape[0], scale * img.shape[1], img.shape[2]), 0)
img2[:, :, 3] = 255
imh, iml, imd = img.shape
im2h, im2l, im2d = img2.shape
midl, midh = int(iml / 2), int(imh / 2)
mid2l, mid2h = int(im2l / 2), int(im2h / 2)
img2[mid2h - midh:mid2h + midh,
     mid2l - midl:mid2l + midl, :] = img[0:midh * 2, 0:midl * 2, :]
print("orig=" + str(np.mean(img2[:, :, 0:3])))
Utils.showImage(img2)

tiltTform1 = SimilarityTransform(translation=(0, mid2h - midh))
#tiltTform2 = AffineTransform(shear=np.deg2rad(tilt))
tiltTform2 = AffineTransform(shear=np.deg2rad(20))
tiltTform3 = SimilarityTransform(translation=(-tilt / 45 * mid2l,
                                              -mid2h + midh))
tiltTform = tiltTform1 + tiltTform2 + tiltTform3
tilted = warp(
    img2,
    tiltTform,
    output_shape=img2.shape,
    mode=
    'edge',  #mode='constant', cval = 0,  ## TODO: 'constant'+cval, does not work!!!!
    preserve_range=True)
print("tilted=" + str(np.mean(tilted[:, :, 0:3])))
#Utils.showImage(tilted)