示例#1
1
文件: extract.py 项目: mitbal/pemilu
def extract_digits(fname, mode):
    """ Extract the sub images of digits from the scanned image.
        fname is the filename of the image
    """
    c1 = io.imread(fname)

    # Calculate the region of interest, this value is based on experimental observation
    # Basically it divides the scanned image into 3 different size, small, medium, and big
    # However this does not constitute the full variation of size of scanned images
    dim = c1.shape
    y0 = 350
    y1 = y0 + 450
    if dim[0] < 1100:
        y0 = dim[0] * 300 / 1700
        y1 = y0 + dim[0] * 400 / 1700
    elif 1700 < dim[0] < 2400:
        y0 = 350
        y1 = y0 + 450
    else:
        y0 = dim[0] * 350 / 1700
        y1 = y0 + dim[0] * 450 / 1700
    x0 = dim[1] * 19 / 24

    # Cropped and convert to grayscale
    cropped = c1[y0:y1, x0:]
    gcrop = color.rgb2gray(cropped)

    # Threshold to create binary image
    thresh = threshold_otsu(gcrop)
    gcrope = gcrop < thresh

    # Remove unwanted edge
    gcrope = remove_edge(gcrope, mode='r')

    if DEBUG:
        plt.subplot(141);
        plt.title('Cropped')
        plt.imshow(gcrop, cmap='gray')

    # Extract four corner points, either using hough or harris method
    dest_points = extract_corner_hough(gcrope)
    # dest_points = extract_corner_harris(gcrope)
    src_points = [(0, 0), (0, HEIGHT), (WIDTH, HEIGHT), (WIDTH, 0)]

    if not dest_points:
        return False

    if DEBUG:
        plt.subplot(143)
        plt.title('Thresholded & corner points')
        plt.imshow(gcrope, cmap='gray')
        for p in dest_points:
            plt.plot(p[0], p[1], 'bo', markersize=10)

    #Transform to rescale and reorient the image
    dst = np.array(dest_points)
    src = np.array(src_points)
    tform = PiecewiseAffineTransform()
    tform.estimate(src, dst)
    warped = warp(gcrope, tform, output_shape=(HEIGHT, WIDTH))

    if mode == 'test':
        # Save to file
        fname_out = fname.split('/')[-1][:-4] + '-ex' + '.png'
        io.imsave(fname_out, warped)
        return True

    if DEBUG:
        plt.subplot(144)
        plt.title('Warped')
        plt.imshow(warped, cmap='gray')
        plt.show()

    # Prepare the directory
    if not os.path.exists('extracted'):
        os.makedirs('extracted')
        for i in xrange(10):
            os.makedirs('extracted/' + str(i))

    # Load the annotation for each digit
    fname_txt = fname[:-3] + 'txt'
    f = open(fname_txt, 'r')
    f.readline()  # Remove the header
    lines = f.readline().split(',')
    f.close()

    width = WIDTH / 3
    # Extract each digit
    for i in xrange(4):
        if len(lines[i]) < 3:
            hundred = '0'
        else:
            hundred = lines[i][0]
        counter[int(hundred)] += 1
        patch = remove_edge(warped[i * 100:i * 100 + 100, :width], mode='full')
        io.imsave('extracted/' + hundred + '/' + str(counter[int(hundred)]) + '.png', patch)

    for i in xrange(4):
        if len(lines[i]) == 3:
            hundred = lines[i][1]
        elif len(lines[i]) == 2:
            hundred = lines[i][0]
        else:
            hundred = '0'
        counter[int(hundred)] += 1
        patch = remove_edge(warped[i * 100:i * 100 + 100, width:2 * width], mode='full')
        io.imsave('extracted/' + hundred + '/' + str(counter[int(hundred)]) + '.png', patch)

    for i in xrange(4):
        if len(lines[i]) == 1:
            hundred = lines[i][0]
        elif len(lines[i]) == 2:
            hundred = lines[i][1]
        elif len(lines[i]) == 3:
            hundred = lines[i][2]
        else:
            hundred = '0'
        counter[int(hundred)] += 1
        patch = remove_edge(warped[i * 100:i * 100 + 100, 2 * width:3 * width], mode='full')
        io.imsave('extracted/' + hundred + '/' + str(counter[int(hundred)]) + '.png', patch)

    return True
示例#2
0
 def PiecewiseAffine(img, mask, points=8):
     ### piecwise affine ###
     rows, cols = img.shape[0], img.shape[1]
     src_cols = np.linspace(0, cols, points)
     src_rows = np.linspace(0, rows, points)
     src_rows, src_cols = np.meshgrid(src_rows, src_cols)
     src = np.dstack([src_cols.flat, src_rows.flat])[0]
     # add offset
     dst_rows = np.zeros(src[:, 1].shape) + src[:, 1]
     for i in list(range(points))[1:-1]:
         dst_rows[i::points] += np.random.normal(
             loc=0,
             scale=rows / (points * 10),
             size=dst_rows[i::points].shape)
     dst_cols = np.zeros(src[:, 0].shape) + src[:, 0]
     dst_cols[points:-points] += np.random.normal(
         loc=0,
         scale=rows / (points * 10),
         size=dst_cols[points:-points].shape)
     dst = np.vstack([dst_cols, dst_rows]).T
     # compute transform
     tform = PiecewiseAffineTransform()
     tform.estimate(src, dst)
     # apply transform
     img = warp(img, tform, output_shape=(rows, cols))
     mask = warp(mask, tform, output_shape=(rows, cols))
     return img, mask
示例#3
0
def getMaskContour(mask_dir, atlas_img, predicted_pts, actual_pts, cwd, n, main_mask):
    """
    Gets the contour of the brain's boundaries and applies a piecewise affine transform to the brain atlas
    based on the cortical landmarks predicted in dlc_predict (and peaks of activity on the sensory map, if available).
    :param mask_dir: The path to the directory containing the U-net masks of the brain's boundaries.
    :param atlas_img: The brain atlas to be transformed.
    :param predicted_pts: The coordinates of the cortical landmarks predicted in dlc_predict (or, for the second run
    of this function, the coordinates of the peaks of activity in the sensory map).
    :param actual_pts: The fixed coordinates of the cortical landmarks on the brain atlas (or, for the second run of
    this function, the fixed coordinates of the peaks of sensory activity on the brain atlas).
    :param cwd: The path to the current working directory.
    :param n: The number of the current image in the directory.
    """
    c_landmarks = np.empty([0, 2])
    c_atlas_landmarks = np.empty([0, 2])
    mask = cv2.imread(mask_dir, cv2.IMREAD_GRAYSCALE)
    atlas_to_warp = atlas_img
    mask = np.uint8(mask)
    cnts, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]
    for cnt in cnts:
        cnt = cnt[:, 0, :]
        cnt = np.asarray(cnt).astype("float32")
        c_landmarks = np.concatenate((c_landmarks, cnt))
        c_atlas_landmarks = np.concatenate((c_atlas_landmarks, cnt))
    c_landmarks = np.concatenate((c_landmarks, predicted_pts))
    c_atlas_landmarks = np.concatenate((c_atlas_landmarks, actual_pts))
    tform = PiecewiseAffineTransform()
    tform.estimate(c_atlas_landmarks, c_landmarks)
    dst = warp(atlas_to_warp, tform, output_shape=(512, 512))
    if main_mask:
        io.imsave(os.path.join(cwd, "mask_{}.png".format(n)), mask)
    return dst
示例#4
0
def get_mesh_warped_img(img, pts, pose_idx):
    """Piecewise affine warp to template mesh"""
    src_points = mean_shapes_mesh_ex[
        pose_idx]  # mesh template size: 0 ~ mesh_h, 0 ~ mesh_w
    dst_points = np.zeros((n_points_ex, 2),
                          np.float32)  # current points with outside box
    dst_points[0:n_points, :] = pts  # current points
    warp_mat_t_template2pts = get_warp_mat_t_template2pts(
        pts, pose_idx)  # template -> pts warp matrix
    outside_pts = get_warped_pts(
        mean_shape_ex_outside, warp_mat_t_template2pts)  # warp outside points
    dst_points[n_points:, :] = outside_pts  # current points with outside box

    tform = PiecewiseAffineTransform()  # piecewise affine transform
    tform.estimate(src_points, dst_points)
    img_mesh_warped = warp(img, tform, output_shape=(mesh_h, mesh_w))

    # # draw
    # plt.figure(2)
    # plt.gcf().clear()
    # plt.subplot(1, 2, 1)
    # plt.imshow(img)
    # plt.scatter(src_points[:, 0], src_points[:, 1], c='r')
    # plt.subplot(1, 2, 2)
    # plt.imshow(img)
    # plt.scatter(dst_points[:, 0], dst_points[:, 1], c='r')
    # plt.draw()
    # plt.pause(0.001)
    # k = 1
    # k = k + 1

    return img_mesh_warped
示例#5
0
def warp_piecewise_affine(image, map_args={}, output_shape=None, order=1, mode='constant', cval=0.0, clip=True, preserve_range=False):
    #image = imageGlobal
    rows, cols = image.shape[0], image.shape[1]

    src_cols = np.linspace(0, cols, 20)
    src_rows = np.linspace(0, rows, 10)
    src_rows, src_cols = np.meshgrid(src_rows, src_cols)
    src = np.dstack([src_cols.flat, src_rows.flat])[0]

    dst_rows = src[:, 1] - np.sin(np.linspace(0, 3 * np.pi, src.shape[0])) * 50
    dst_cols = src[:, 0]
    dst_rows *= 1.5
    dst_rows -= 1.5 * 50
    dst = np.vstack([dst_cols, dst_rows]).T

    tform = PiecewiseAffineTransform()
    tform.estimate(src, dst)

    out_rows = image.shape[0] - 1.5 * 50
    out_cols = cols
    out = warp(image, tform, output_shape=(out_rows, out_cols))

    fig, ax = plt.subplots()
    ax.imshow(out)
    ax.plot(tform.inverse(src)[:, 0], tform.inverse(src)[:, 1], '.b')
    ax.axis((0, out_cols, out_rows, 0))
    plt.show()
示例#6
0
    def __call__(self, sample):
        image = sample
        if np.random.rand() > self.prob:
            return image

        image = np.array(image)
        rows, cols = image.shape[0], image.shape[1]
        src_cols = np.linspace(0, cols, 4)
        src_rows = np.linspace(0, rows, 2)
        src_rows, src_cols = np.meshgrid(src_rows, src_cols)
        src = np.dstack([src_cols.flat, src_rows.flat])[0]

        dfactor = np.random.randint(10, 20)
        pfactor = (np.random.randint(0, 3), np.random.randint(2, 4))
        dst_rows = src[:, 1] - np.sin(
            np.linspace(pfactor[0] * np.pi / 2, pfactor[1] * np.pi,
                        src.shape[0])) * dfactor
        dst_cols = src[:, 0]
        dst = np.vstack([dst_cols, dst_rows]).T

        tform = PiecewiseAffineTransform()
        tform.estimate(src, dst)
        out_image = warp(image,
                         tform,
                         output_shape=(rows, cols),
                         cval=255,
                         preserve_range=True)
        out_image = Image.fromarray(np.uint8(out_image))
        return out_image
示例#7
0
def affin_transform(img_frame, nrrd_frame):
    '''
    transform the shape of nrrd_frame to fit the tissue image frame
    :param img_frame:
    :param nrrd_frame:
    :return:
    '''
    hull = get_convex_hull(img_frame, False, False)
    point_img_list = calculate_intersection_point(hull, img_frame, False)
    x = int(img_frame.shape[1] * 0.5)
    y = int(img_frame.shape[0] * 0.5)
    point_img_list.append((x, y))
    point_img_list = np.float32(point_img_list)

    nrrd_frame_reference, nrrd_frame_clean = pre_calibrate_single_frame(
        img_frame, nrrd_frame, False)

    hull_nrrd = get_convex_hull(nrrd_frame_reference, False, False, 1)

    point_nrrd_list = calculate_intersection_point(hull_nrrd,
                                                   nrrd_frame_reference, False)
    x = int(img_frame.shape[1] * 0.5)
    y = int(img_frame.shape[0] * 0.5)
    point_nrrd_list.append((x, y))
    point_nrrd_list = np.float32(point_nrrd_list)

    tform = PiecewiseAffineTransform()
    tform.estimate(point_img_list, point_nrrd_list)
    out = warp(nrrd_frame_clean,
               tform,
               output_shape=nrrd_frame_reference.shape)

    return out
示例#8
0
def piecewise_affine_transform(image, srcAnchor, tgtAnchor):
    '''  Return 0-1 range
    '''
    trans = PiecewiseAffineTransform()
    trans.estimate(srcAnchor, tgtAnchor)
    warped = warp(image, trans)
    return warped
示例#9
0
def whole_rdistort(im, severity=1, scop=40):
    """
    Using the affine projection method in skimg,
    Realize the picture through the corresponding coordinate projection
    Specifies the distortion effect of the form. This function will normalize 0-1
    """

    if severity == 0:
        return im

    theta = severity * scop
    rows, cols = im.shape[:2]
    colpoints = max(int(cols * severity * 0.05), 3)
    rowpoints = max(int(rows * severity * 0.05), 3)

    src_cols = np.linspace(0, cols, colpoints)
    src_rows = np.linspace(0, rows, rowpoints)
    src_rows, src_cols = np.meshgrid(src_rows, src_cols)
    src = np.dstack([src_cols.flat, src_rows.flat])[0]

    # The key location for wave distortion effect
    dst_rows = src[:, 1] - period_map(np.linspace(0, 100, src.shape[0]), 50,
                                      20)

    # dst columns
    dst_cols = src[:,
                   0] - np.sin(np.linspace(0, 3 * np.pi, src.shape[0])) * theta

    dst = np.vstack([dst_cols, dst_rows]).T
    tform = PiecewiseAffineTransform()
    tform.estimate(src, dst)
    image = warp(im, tform, mode='edge', output_shape=(rows, cols)) * 255
    return np.array(cvt_uint8(image))
示例#10
0
def convert_image(imageLoc, imageOutLoc, var):
    image = misc.imread(imageLoc)
    rows, cols = image.shape[0], image.shape[1]
    src_cols = np.linspace(0, cols, 20)
    src_rows = np.linspace(0, rows, 10)
    src_rows, src_cols = np.meshgrid(src_rows, src_cols)
    src = np.dstack([src_cols.flat, src_rows.flat])[0]

    # add sinusoidal oscillation to row coordinates
    #dst_rows = src[:, 1] - np.sin(np.linspace(0, 3 * np.pi, src.shape[0])) * 50
    #dst_cols = src[:, 0]
    #dst_rows *= 1.5
    #dst_rows -= 1.5 * 50
    #dst = np.vstack([dst_cols, dst_rows]).T

    mu, sigma = 0, var  # mean and standard deviation
    s = np.random.normal(mu, sigma, 1000)  #src.shape[0]

    # add Gausian oscillation to row coordinates
    dst_rows = src[:, 1] - np.random.normal(mu, sigma, src.shape[0]) * 50
    dst_cols = src[:, 0]
    dst_rows *= 1.5
    dst_rows -= 1.5 * 50
    dst = np.vstack([dst_cols, dst_rows]).T

    tform = PiecewiseAffineTransform()
    tform.estimate(src, dst)

    tform = PiecewiseAffineTransform()
    tform.estimate(src, dst)

    out_rows = image.shape[0] - 1.5 * 50
    out_cols = cols
    out = warp(image, tform, output_shape=(out_rows, out_cols))
    misc.imsave(imageOutLoc, out)
示例#11
0
文件: pwa.py 项目: hyb1234hi/emci
def pwa(image, src, dst, shape):
    """
    :param image: aligned image or cropped image
    :param src: normalized src landmarks
    :param dst: normalized dst landmarks
    :param shape: output shape, [rows, cols]
    :return: piece-wise affined image
    """
    image = cv2.resize(image, shape)
    src[:, 0] *= shape[0]
    src[:, 1] *= shape[1]
    dst[:, 0] *= shape[0]
    dst[:, 1] *= shape[1]
    N = 10
    z = np.zeros((N, 1))
    l = np.reshape(np.linspace(0, shape[1], N), (N, 1))
    top = np.concatenate([l, z], axis=1)
    bottom = np.concatenate([l, np.ones((N, 1)) * shape[0]], axis=1)

    l = np.reshape(np.linspace(0, shape[0], N), (N, 1))
    left = np.concatenate([z, l], axis=1)
    right = np.concatenate([np.ones((N, 1)) * shape[1], l], axis=1)

    add = np.concatenate([top, bottom, left, right], axis=0)
    src = np.concatenate([src, add], axis=0)
    dst = np.concatenate([dst, add], axis=0)
    tform = PiecewiseAffineTransform()
    tform.estimate(dst, src)
    # out_rows ,out_cols = shape
    out_rows = image.shape[0]
    out_cols = image.shape[1]
    out = warp(image, tform, output_shape=(out_rows, out_cols))
    return out
示例#12
0
def bosse(path='img.png'):
    im = Image.open(path)
    image = np.array(im)
    rows, cols = image.shape[0], image.shape[1]

    src_cols = np.linspace(0, cols, 20)
    src_rows = np.linspace(0, rows, 10)
    src_rows, src_cols = np.meshgrid(src_rows, src_cols)
    src = np.dstack([src_cols.flat, src_rows.flat])[0]

    # add sinusoidal oscillation to row coordinates
    dst_rows = src[:, 1] - np.sin(np.linspace(np.pi, 2 * np.pi,
                                              src.shape[0])) * 50
    dst_cols = src[:, 0]
    dst_rows *= 1.5
    dst_rows -= 1.5 * 50
    dst = np.vstack([dst_cols, dst_rows]).T

    tform = PiecewiseAffineTransform()
    tform.estimate(src, dst)

    out_rows = image.shape[0] - 1.5 * 50
    out_cols = cols
    out = warp(image, tform, output_shape=(out_rows, out_cols))

    im = Image.fromarray((out * 255).astype(np.uint8))
    im.save("img_modif.png")
示例#13
0
def piecewise_affine_transform():
	image = data.astronaut()
	rows, cols = image.shape[0], image.shape[1]

	src_cols = np.linspace(0, cols, 20)
	src_rows = np.linspace(0, rows, 10)
	src_rows, src_cols = np.meshgrid(src_rows, src_cols)
	src = np.dstack([src_cols.flat, src_rows.flat])[0]

	# Add sinusoidal oscillation to row coordinates.
	dst_rows = src[:, 1] - np.sin(np.linspace(0, 3 * np.pi, src.shape[0])) * 50
	dst_cols = src[:, 0]
	dst_rows *= 1.5
	dst_rows -= 1.5 * 50
	dst = np.vstack([dst_cols, dst_rows]).T

	tform = PiecewiseAffineTransform()
	tform.estimate(src, dst)

	out_rows = image.shape[0] - 1.5 * 50
	out_cols = cols
	out = warp(image, tform, output_shape=(out_rows, out_cols))

	fig, ax = plt.subplots()
	ax.imshow(out)
	ax.plot(tform.inverse(src)[:, 0], tform.inverse(src)[:, 1], '.b')
	ax.axis((0, out_cols, out_rows, 0))
	plt.show()
示例#14
0
def main():
    args = cli()

    reference = io.imread(args.reference)
    sensed = io.imread(args.sensed)

    rows, cols = reference.shape
    ref_cols = np.linspace(0, cols, 20)
    ref_rows = np.linspace(0, rows, 10)
    ref_cols, ref_rows = np.meshgrid(ref_rows, ref_rows)
    src = np.dstack([ref_cols.flat, ref_rows.flat])[0]

    rows, cols = sensed.shape
    ref_cols = np.linspace(0, cols, 20)
    ref_rows = np.linspace(0, rows, 10)
    ref_cols, ref_rows = np.meshgrid(ref_rows, ref_rows)
    dst = np.dstack([ref_cols.flat, ref_rows.flat])[0]

    tform = PiecewiseAffineTransform()
    tform.estimate(src, dst)

    out = warp(sensed, tform)
    plt.subplot(211)
    plt.imshow(reference)

    plt.subplot(212)
    plt.imshow(out)

    plt.show()
def affine_transform(img):
    rows, cols = img.shape[0], img.shape[1]

    src_cols = np.linspace(0, cols, 20)
    src_rows = np.linspace(0, rows, 20)
    src_rows, src_cols = np.meshgrid(src_rows, src_cols)
    src = np.dstack([src_cols.flat, src_rows.flat])[0]

    # add sinusoidal oscillation to row coordinates
    dst_rows = src[:, 1]  # - np.sin(np.linspace(0, 3 * np.pi, src.shape[0])) * 50
    print(src[:, 1])
    print(src[:, 0])
    dst_cols = src[:, 0] - np.sin((src[:, 0] / np.max(src[:, 0])) * np.pi) * np.max(src[:, 0])
    print(dst_cols)

    dst = np.vstack([dst_cols, dst_rows]).T

    tform = PiecewiseAffineTransform()
    tform.estimate(src, dst)

    out_rows = rows
    out_cols = cols
    out = warp(img, tform, output_shape=(out_rows, out_cols))

    fig, ax = plt.subplots()
    ax.imshow(out)
    ax.plot(tform.inverse(src)[:, 0], tform.inverse(src)[:, 1], '.b')
    ax.axis((0, out_cols, out_rows, 0))
    plt.savefig('plots/piecewise_affine.png')
    plt.show()
示例#16
0
def transform_image(stationary_image, warp_image, stationary_points,
                    warp_points):
    tform = PiecewiseAffineTransform()
    tform.estimate(stationary_points, warp_points)
    out_rows = stationary_image.shape[0]
    out_cols = stationary_image.shape[1]
    warped_image = warp(warp_image, tform, output_shape=(out_rows, out_cols))
    return warped_image
示例#17
0
def get_transform(image, src_points, dst_points):
    src_points = np.array([[0, 0], [0, image.shape[0]], [image.shape[0], 0],
                           list(image.shape[:2])] + src_points.tolist())
    dst_points = np.array([[0, 0], [0, image.shape[0]], [image.shape[0], 0],
                           list(image.shape[:2])] + dst_points.tolist())
    tform3 = PiecewiseAffineTransform()
    tform3.estimate(dst_points, src_points)
    return tform3
示例#18
0
    def piecewise_shift(item):
        (k, v) = item

        frame_shift = asarray([x[k] for x in shifts])
        dst = asarray([s + x for s, x in zip(src, frame_shift)])

        tform = PiecewiseAffineTransform()
        tform.estimate(src, dst)
        return warp(v, tform)
示例#19
0
def randdistort(img):
    image = np.array(plt.imread(img))
    rows, cols = image.shape[0], image.shape[1]

    src_cols = np.linspace(0, cols, 20)
    src_rows = np.linspace(0, rows, 10)
    src_rows, src_cols = np.meshgrid(src_rows, src_cols)
    src = np.dstack([src_cols.flat, src_rows.flat])[0]
    funclist = [
        lambda x: np.sin(x), lambda x: np.cos(x), lambda x: np.arctan(x)
    ]
    numberOfFunctions = random.randint(2, 5)

    # add sinusoidal oscillation to row coordinates
    def func(x):
        newfuncs = []
        for i in range(numberOfFunctions):
            rand1 = random.randint(1, 5)
            rand2 = random.randint(0, 30)
            rand3 = random.randint(0, 10)
            newfuncs.append(lambda x: rand3 * random.choice(funclist)
                            (1 / rand1 * x + rand2))
        for function in newfuncs:
            x += function(x)
        return x

    def func2(x):
        newfuncs = []
        for i in range(numberOfFunctions):
            rand1 = random.randint(1, 5)
            rand2 = random.randint(0, 30)
            rand3 = random.randint(0, 15)
            newfuncs.append(lambda x: rand3 * random.choice(funclist)
                            (1 / rand1 * x + rand2))
        for function in newfuncs:
            x += function(x)
        return x

    dst_rows = src[:, 1] + func(np.linspace(0, 10 * np.pi, src.shape[0]))
    dst_cols = src[:, 0] + func2(np.linspace(0, 3 * np.pi, src.shape[0]))
    dst_rows *= 1.5
    dst_rows -= 1.5 * 50
    dst = np.vstack([dst_cols, dst_rows]).T

    tform = PiecewiseAffineTransform()
    tform.estimate(src, dst)

    out_rows = image.shape[0] - 1.5 * 50
    out_cols = cols
    out = warp(image, tform, output_shape=(out_rows, out_cols))

    fig, ax = plt.subplots()
    ax.imshow(out)
    ax.plot(tform.inverse(src)[:, 0], tform.inverse(src)[:, 1], '.b')
    plt.imsave('name.png', out)
    ax.axis((0, out_cols, out_rows, 0))
    return 'name.png'
示例#20
0
 def piecewise_affine_transform(image, source_lmks, target_lmks):
     anchor = list(range(31)) + [36, 39, 42, 45, 48, 51, 54, 57]
     tgt_lmks = target_lmks[anchor, :]
     dst_lmks = source_lmks[anchor, :]
     tform = PiecewiseAffineTransform()
     tform.estimate(tgt_lmks, dst_lmks)
     dst = warp(image, tform,
                output_shape=image.shape[:2]).astype(np.float32)
     return dst
示例#21
0
def arap_transform(cloth, gOriginalMesh, gDeformedMesh):

    pwtform = PiecewiseAffineTransform()
    pwtform.estimate(gDeformedMesh.vertices, gOriginalMesh.vertices)
    warpedUpperClothfloat = warp(cloth, pwtform, output_shape=cloth.shape)

    # 4.4 convert type from float64 to uint8
    warpedUpperClothfloat = 255 * warpedUpperClothfloat  # Now scale by 255
    warpedUpperCloth = warpedUpperClothfloat.astype(np.uint8)

    return warpedUpperCloth
def piecewise_affine(image):
    rows, cols = image.shape[0], image.shape[1]
    src_cols = np.linspace(0, cols, 20)
    src_rows = np.linspace(0, rows, 20)
    src_rows, src_cols = np.meshgrid(src_rows, src_cols)
    src = np.dstack([src_cols.flat, src_rows.flat])[0]
    dst_rows = src[:, 1] - np.cos(np.linspace(0, 3 * np.pi, src.shape[0])) * 20
    dst_cols = src[:, 0]
    dst = np.vstack([dst_cols, dst_rows]).T
    tform = PiecewiseAffineTransform()
    tform.estimate(src, dst)
    out = warp(image, tform)
    return out
def main():
    inv_param = np.linalg.inv(
        np.array([(i, i**2, i**3, i**4, 1) for i in range(0, 17, 4)]))
    param = inv_param.dot(np.array((0, 0.04, 0.01, 0.04, 0)))
    beautify_param = np.array([(i, i**2, i**3, i**4, 1)
                               for i in range(0, 17)]).dot(param)
    beautify_param = np.tile(beautify_param, (2, 1)).transpose()
    cv2.namedWindow("frame")
    cap = cv2.VideoCapture(0)
    while True:
        ret, frame = cap.read()
        frame = cv2.flip(frame, 1)
        input_img = frame.copy() / 255.0
        height, width, depth = frame.shape
        src = np.array(
            ((0, 0), (width / 2, 0), (width - 1, 0), (0, height / 2),
             (0, height - 1), (width - 1, height - 1)))
        dst = np.array(
            ((0, 0), (width / 2, 0), (width - 1, 0), (0, height / 2),
             (0, height - 1), (width - 1, height - 1)))
        face_landmarks_list = face_recognition.face_landmarks(frame)
        transform = PiecewiseAffineTransform()
        for face_landmarks in face_landmarks_list:
            chin = face_landmarks['chin']
            nose_bridge = face_landmarks['nose_bridge']

            # draw_multiline_line(frame, chin, (0, 255, 0), 2)
            # draw_multiline_line(input_img, chin, (0, 1.0, 0), 2)
            src = np.vstack([src, np.array(chin)])
            src = np.vstack([src, np.array(nose_bridge)])

            nose_point = face_landmarks['nose_bridge'][-1]
            dst = np.vstack([
                dst, (np.array(chin) - np.tile(np.array(nose_point),
                                               (17, 1))) * beautify_param +
                np.array(chin)
            ])
            dst = np.vstack([dst, np.array(nose_bridge)])

            # draw_multiline_line(frame,nose_bridge,(255,0,0),5)
            # nose_tip = face_landmarks['nose_tip']
            # draw_multiline_line(frame,nose_tip,(0,0,255),5)
        transform.estimate(src, dst)
        out_img = warp(frame, transform)
        result = np.hstack([input_img, out_img])
        cv2.imshow('frame', result)
        cv2.imwrite('beauty.png', result)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.release()
    cv2.destroyAllWindows()
示例#24
0
def warpFace(im, oldLandmarks, newLandmarks, justFace=False, output_shape=None):
    print("warping face")
    if not justFace:
        cornerPts = np.array([(0, 0), (im.shape[1], 0), (im.shape[1], im.shape[0]), (0, im.shape[0])])

        oldLandmarks = np.append(oldLandmarks, cornerPts, axis=0)
        newLandmarks = np.append(newLandmarks, cornerPts, axis=0)

    tform = PiecewiseAffineTransform()
    tform.estimate(newLandmarks, oldLandmarks)

    warped = warp(im, tform, output_shape=output_shape)
    warped = skimage.img_as_ubyte(warped)
    return warped
示例#25
0
def warp(im, points, disp_min, disp_max, disp_len=None, disp_angle=None):
    h, w = im.shape[:2]

    # Include the corners
    src_pts = np.array([[0, 0], [w, 0], [0, h], [w, h]])
    dst_pts = np.array([[0, 0], [w, 0], [0, h], [w, h]])

    for i in range(points):
        rand_x = random.uniform(0, w)
        rand_y = random.uniform(0, h)
        p = np.array([rand_x, rand_y])
        if disp_len is None:
            rand_len = random.uniform(disp_min, disp_max)
        else:
            rand_len = disp_len

        if disp_angle is None:
            rand_angle = np.deg2rad(random.uniform(0, 360))
        else:
            rand_angle = disp_angle

        p2_x = rand_x + np.cos(rand_angle) * rand_len
        p2_y = rand_y + np.sin(rand_angle) * rand_len
        p2 = np.array([p2_x, p2_y])

        if src_pts is None:
            src_pts = np.array([p])
        else:
            temp = np.vstack((src_pts, p))
            src_pts = temp

        if dst_pts is None:
            dst_pts = np.array([p2])
        else:
            temp = np.vstack((dst_pts, p2))
            dst_pts = temp

    from skimage.transform import warp, PiecewiseAffineTransform
    tform = PiecewiseAffineTransform()
    tform.estimate(src_pts, dst_pts)
    warped_im = warp(im, tform)

    # Convert to OpenCV
    from skimage import img_as_ubyte
    warped_im = img_as_ubyte(warped_im)
    return warped_im
def non_linear_warp_transform(img, annotation):
    rows, cols = img.shape[0], img.shape[1]

    src_cols = np.linspace(0, cols, 6)
    src_rows = np.linspace(0, rows, 6)
    src_rows, src_cols = np.meshgrid(src_rows, src_cols)
    src = np.dstack([src_cols.flat, src_rows.flat])[0]

    dst = np.random.normal(0.0, 10, size=(36, 2)) + src

    tform = PiecewiseAffineTransform()
    tform.estimate(src, dst)

    out_rows = img.shape[0]
    out_cols = img.shape[1]
    img_out = warp(img, tform, output_shape=(out_rows, out_cols))
    annotation_out = warp(annotation, tform, output_shape=(out_rows, out_cols))
    return img_out, annotation_out
示例#27
0
def warp_it(image):
    import numpy as np

    from skimage.transform import PiecewiseAffineTransform, warp
    from scipy import misc
    from PIL import Image
    rows, cols = image.shape[0], image.shape[1]

    src_cols = np.linspace(0, cols, 20)
    src_rows = np.linspace(0, rows, 10)
    src_rows, src_cols = np.meshgrid(src_rows, src_cols)
    src = np.dstack([src_cols.flat, src_rows.flat])[0]

    # print(str(src))
    # print("00000000000000000000000000000000000000000000000000")
    # print(str(src[:, 1]))
    # print("00000000000000000000000000000000000000000000000000")

    # print(str(src[:, 0]))

    # # add sinusoidal oscillation to coordinates
    # dst_rows = src[:, 1] - np.sin(np.linspace(0, 3 * np.pi, src.shape[0])) * 16
    # dst_cols = src[:, 0] - np.sin(np.linspace(0, 3 * np.pi, src.shape[0])) * 16
    # dst_rows += 8
    # dst = np.vstack([dst_cols, dst_rows]).T
    from random import randint
    dst = src.copy()
    for i in range(dst.shape[0]):
        x = dst[i][0]
        y = dst[i][1]

        dst[i][0] += randint(0, 15)
        dst[i][1] += randint(0, 15)

    tform = PiecewiseAffineTransform()
    tform.estimate(src, dst)

    out_rows = image.shape[0] - 1.5 * 16
    out_cols = cols
    out = warp(image, tform, output_shape=(rows, cols))
    from skimage import img_as_ubyte

    return out
示例#28
0
def adapt_contour(img_in, mask_in, mask_out=None):

    contours_in = measure.find_contours(mask_in, 0)
    landmarks_in = equidistant_landmarks(contours_in[0], 24)

    contours_out = measure.find_contours(mask_out, 0)
    landmarks_out = equidistant_landmarks(contours_out[0], 24)

    if len(landmarks_in) > len(landmarks_out):
        landmarks_in = landmarks_in[:len(landmarks_out)]
    else:
        landmarks_out = landmarks_out[:len(landmarks_in)]

    tform = PiecewiseAffineTransform()
    tform.estimate(np.fliplr(np.array(landmarks_out)),
                   np.fliplr(np.array(landmarks_in)))
    img_out = warp(img_in, tform, output_shape=img_in.shape)

    return img_out, landmarks_in, landmarks_out
示例#29
0
def image_perspective_transform(im, angle=np.pi / 4, d=0.):

    nbre_samples = 10

    rows = im.shape[0]
    cols = im.shape[1]

    if d == 0.:
        d = im.shape[1] * 10

    h = im.shape[0]
    l = im.shape[1]
    h1 = np.cos(angle) * h
    delta_d = np.sin(angle) * h
    h2 = d * h1 / (d + delta_d)
    l2 = d * l / (d + delta_d)

    #    l2 = h2

    src_row = np.linspace(0, h, nbre_samples)
    src_col = np.linspace(0, l, nbre_samples)

    src_row, src_col = np.meshgrid(src_row, src_col)
    src = np.dstack([src_col.flat, src_row.flat])[0]

    dst_row = h - np.linspace(h2, 0, nbre_samples)
    dst_col = np.linspace(0, l, nbre_samples)

    dst_row, dst_col = np.meshgrid(dst_row, dst_col)

    scale = np.linspace(l2 / l, 1, nbre_samples)
    shift = np.linspace(l - l2, 0, nbre_samples) / 2.

    dst_col = dst_col * scale[np.newaxis, :] + shift[np.newaxis, :]

    dst = np.dstack([dst_col.flat, dst_row.flat])[0]

    transform = PiecewiseAffineTransform()
    transform.estimate(dst, src)

    return warp(im, transform, output_shape=im.shape)
示例#30
0
def deforms(image):
    ratio = 5
    rows, cols = image.shape[0], image.shape[1]
    src_cols = np.linspace(0, cols, 20)
    src_rows = np.linspace(0, rows, 10)
    src_rows, src_cols = np.meshgrid(src_rows, src_cols)
    src = np.dstack([src_cols.flat, src_rows.flat])[0]

    # add sinusoidal oscillation to row coordinates
    dst_rows = src[:,
                   1] - np.sin(np.linspace(0, 3 * np.pi, src.shape[0])) * ratio
    dst_cols = src[:, 0]
    dst_rows *= 1.5
    dst_rows -= 1.5 * ratio
    dst = np.vstack([dst_cols, dst_rows]).T

    tform = PiecewiseAffineTransform()
    tform.estimate(src, dst)

    image = warp(image, tform, output_shape=(rows, cols))
    return image
示例#31
0
def distort_data(shared_x, shared_y, dist_deg, pickle=False):
	x_orig = shared_x.get_value()
	#y_orig = shared_y.get_value()
	y_orig = shared_y
	x_distorted = []
	dim = x_orig.shape[1]
	image_dim = int(math.sqrt(dim))
	for x in x_orig:
		image = x.reshape((image_dim, image_dim))	
		rows, cols = image.shape[0], image.shape[1]

		src_cols = numpy.linspace(0, cols, 20)
		src_rows = numpy.linspace(0, rows, 10)
		src_rows, src_cols = numpy.meshgrid(src_rows, src_cols)
		src = numpy.dstack([src_cols.flat, src_rows.flat])[0]

		# add sinusoidal oscillation to row coordinates
		dst_rows = src[:, 1] - numpy.sin(numpy.linspace(0, 3 * numpy.pi, src.shape[0])) * dist_deg
		dst_cols = src[:, 0]
		dst_rows *= 1.5
		dst_rows -= 1.5 * dist_deg
		dst = numpy.vstack([dst_cols, dst_rows]).T


		tform = PiecewiseAffineTransform()
		tform.estimate(src, dst)

		out_rows = image.shape[0]
		out_cols = cols
		out = warp(image, tform, output_shape=(out_rows, out_cols))
		x_distorted.append(out.reshape(dim))
	new_x_np = numpy.concatenate([x_orig, x_distorted], axis=0)
	new_y_np = numpy.concatenate([y_orig, y_orig], axis=0)
	new_x = theano.shared(value = new_x_np, borrow=True)
	new_y = theano.shared(value = new_y_np, borrow=True)
	if pickle:
 		out_file = gzip.open("train_data_with_distortion.pkl.gz", 'wb')
        pickle.dump((new_x_np, new_y_np), out_file)
	return new_x, new_y
def generate_sine_frame(image_init,
                        label_init,
                        height_limit=(0, 20),
                        scale_limit=(1.1, 1.3),
                        shift_limit=(25, 35)):
    """Piecewise Affine to simulate sinusoidal corneal limbus.

    Argument:
        image_init: 1st frame WITHOUT watermark
        label_init: 1st corresponding label
        height_limit: peak of sine
        scale_limit: < 1: enlarge; > 1: shrink
        shift_limit: horizontal shift, make the shrunken curve lower
    Returns:
        image: warped image (float, range(0, 255))
        label: warped label (float, set{0, 1})
    """
    height, width = image_init.shape
    src_x, src_y = np.linspace(0, width, 20), np.linspace(0, height, 10)
    src_x, src_y = np.meshgrid(src_x, src_y, indexing='ij')
    # Cartesian indexing, src.shape: (num of points, 2)
    src = np.vstack([src_x.flat, src_y.flat]).T
    # Add sinusoidal oscillation to y coordinates
    half_period_num = 3  # overall sinuous shape
    max_height = np.random.uniform(*height_limit)  # the max value of sine

    dst_x = src[:, 0]
    dst_y = src[:, 1] + np.sin(
        np.linspace(0, half_period_num * np.pi, src.shape[0])) * max_height

    scale = np.random.uniform(*scale_limit)
    shift = np.random.uniform(*shift_limit)
    dst_y = scale * dst_y - shift
    dst = np.vstack([dst_x, dst_y]).T

    tform = PiecewiseAffineTransform()
    tform.estimate(src, dst)

    return warp_pairs(image_init, label_init, tform)
def sinus(image, strength):
    rows, cols = image.shape[0], image.shape[1]

    src_cols = np.linspace(0, cols, 5)
    src_rows = np.linspace(0, rows, 2)
    src_rows, src_cols = np.meshgrid(src_rows, src_cols)
    src = np.dstack([src_cols.flat, src_rows.flat])[0]

    # add sinusoidal oscillation to row coordinates
    dst_rows = src[:, 1] - np.sin(np.linspace(0, 2*np.pi, src.shape[0])) * strength 
    dst_cols = src[:, 0]
    dst_rows *= 1.
    dst_rows -= 1.5 * strength
    dst = np.vstack([dst_cols, dst_rows]).T


    tform = PiecewiseAffineTransform()
    tform.estimate(src, dst)

    out_rows = image.shape[0] #- 1.5 * 5
    out_cols = cols
    out = warp(image, tform, output_shape=(out_rows, out_cols))
    return np.array(out, dtype='float32')
from skimage import data


image = data.lena()
rows, cols = image.shape[0], image.shape[1]

src_cols = np.linspace(0, cols, 20)
src_rows = np.linspace(0, rows, 10)
src_rows, src_cols = np.meshgrid(src_rows, src_cols)
src = np.dstack([src_cols.flat, src_rows.flat])[0]

# add sinusoidal oscillation to row coordinates
dst_rows = src[:, 1] - np.sin(np.linspace(0, 3 * np.pi, src.shape[0])) * 50
dst_cols = src[:, 0]
dst_rows *= 1.5
dst_rows -= 1.5 * 50
dst = np.vstack([dst_cols, dst_rows]).T


tform = PiecewiseAffineTransform()
tform.estimate(src, dst)

out_rows = image.shape[0] - 1.5 * 50
out_cols = cols
out = warp(image, tform, output_shape=(out_rows, out_cols))

plt.imshow(out)
plt.plot(tform.inverse(src)[:, 0], tform.inverse(src)[:, 1], '.b')
plt.axis((0, out_cols, out_rows, 0))
plt.show()
示例#35
0
	data_pca[k] = (v+meanshape)-mean

meanshape = ((meanshape-mean)+[cropsize[0]/2,cropsize[1]/2])

imshape = (cropsize[0], cropsize[1], 3)
avim = np.zeros(imshape)
imlen = len(data_pca.keys())

# for each imaged
count = 0
for filename, values in data_pca.iteritems():
  # warp to meanshape
  im = imread( os.path.join(data_folder, "cropped/", filename) )
  
  tform = PiecewiseAffineTransform()
  tform.estimate(meanshape, values+[cropsize[0]/2,cropsize[1]/2])
  # store in array
  outim = warp(im, tform, output_shape=cropsize)
  #imsave("./averageface/test.bmp", outim)
  avim += skimage.util.img_as_float(outim)
  count += 1
  print str(count)

avim /= imlen
avim *= 255  
avim = avim.astype(np.uint8)
imsave("./average.bmp", Image(avim))

if cleanUp:
  import shutil
  if os.path.exists(os.path.join(config.data_folder, "cropped")):
示例#36
0
def test_piecewise_affine():
    tform = PiecewiseAffineTransform()
    tform.estimate(SRC, DST)
    # make sure each single affine transform is exactly estimated
    assert_array_almost_equal(tform(SRC), DST)
    assert_array_almost_equal(tform.inverse(DST), SRC)
示例#37
0
Use matplotlib's ginput to find point coordinates.
The process of aligning and combining images is known as "image registration".
"""
import matplotlib.pyplot as plt
import numpy as np
from pylab import ginput
from skimage.transform import PiecewiseAffineTransform


img0 = np.flipud(plt.imread('/Users/matar/Documents/Courses/python-seminar/Lectures/05_scikit-image/scikit-image/breakout/register/webreg_0.jpg'))


img1 = np.flipud(plt.imread('/Users/matar/Documents/Courses/python-seminar/Lectures/05_scikit-image/scikit-image/breakout/register/webreg_1.jpg'))


fig = plt.figure()
ax = fig.add_subplot(1,2,1)
ax.imshow(img0, interpolation = 'nearest')
ax.set_title('image 0')
ax2 = fig.add_subplot(1,2,2)
ax2.imshow(img1, interpolation = 'nearest')
ax2.set_title('image 1')

print "click 3 times on image 0\n"
pts0 = ginput(3) #points are returned as tuples
print "click 3 times on image 1\n"
pts1 = ginput(3)

tform = PiecewiseAffineTransform()
tform.estimate(pts0,pts1)