コード例 #1
0
ファイル: test_warps.py プロジェクト: benlongo/scikit-image
def test_slow_warp_nonint_oshape():
    image = np.random.rand(5, 5)

    assert_raises(ValueError, warp, image, lambda xy: xy,
                  output_shape=(13.1, 19.5))

    warp(image, lambda xy: xy, output_shape=(13.0001, 19.9999))
コード例 #2
0
def random_trans_single_output(pic_array):
    # randomly transform the pic_array, which is a numpy nd array
    # flipping
    do_hori_flip = np.random.binomial(1, 0.5)
    if do_hori_flip:
        pic_array = np.fliplr(pic_array)

    do_vert_flip = np.random.binomial(1, 0.5)
    if do_vert_flip:
        pic_array = np.flipud(pic_array)

    # rotation
    pic_array = rotate(pic_array, np.random.random_integers(0, 360),
                       mode='constant', cval=1)

    # scaling
    scale_ratio = log(np.random.uniform(2.5, 4.5))
    afine_tf = tf.AffineTransform(scale=(scale_ratio, scale_ratio))
    pic_array = tf.warp(pic_array, afine_tf, mode='constant', cval=1)

    # translation
    trans_length = np.random.random_integers(-6, 6, 2)
    trans_length = (trans_length[0], trans_length[1])
    afine_tf = tf.AffineTransform(translation=trans_length)
    pic_array = tf.warp(pic_array, afine_tf, mode='constant', cval=1)

    return pic_array
コード例 #3
0
def test():
    img = skimage.img_as_float(data.lena())
    img_size = img.shape[:2]

    trans = get_transform(20,15,1.05, 0.02, img_size)
    img_transformed = transform.warp(img, trans)
    obj_func = lambda x: transform_and_compare(img_transformed, img, x)
    x0 = np.array([0,0,1, 0])
    results = optimize.fmin_bfgs(obj_func, x0)

    transform_estimated = get_simple_transform(results) 
    transform_optimal = transform.AffineTransform(np.linalg.inv(trans._matrix))
    params_optimal = np.concatenate([transform_optimal.translation,
                                    transform_optimal.scale[0:1],
                                    [transform_optimal.rotation]])
    img_registered = transform.warp(img_transformed, 
                                    transform_estimated)
    err_original = mean_sq_diff(img_transformed, img)
    err_optimal = transform_and_compare(img_transformed, img, params_optimal) 
    err_actual = transform_and_compare(img_transformed, img, results) 
    err_relative = err_optimal/err_original
    
    print "Params optimal:", params_optimal
    print "Params estimated:", results
    print "Error without registration:", err_original
    print "Error of optimal registration:", err_optimal 
    print "Error of estimated transformation %f (%.2f %% of intial)" % (err_actual,
                                                            err_relative*100.)

    plt.figure()
    plt.subplot(121)
    plt.imshow(img_transformed)
    plt.subplot(122)
    plt.imshow(img_registered)
コード例 #4
0
    def random_translate_images(self, data, xtrans_r=None, ytrans_r=None):
        if xtrans_r is None:
            if self.xtrans_bound is None:
                xtrans_r = [-5, 5]
            else:
                xtrans_r = self.xtrans_bound
        if ytrans_r is None:
            if self.ytrans_bound is None:
                ytrans_r = [-5, 5]
            else:
                ytrans_r = self.ytrans_bound
        if data.ndim == 3:
            for i in xrange(data[0].shape[0]):
                xtrans = self.rng.random_integers(xtrans_r[0], xtrans_r[1])
                ytrans = self.rng.random_integers(ytrans_r[0], ytrans_r[1])

                map_args = {
                    "xtranslate": xtrans,
                    "ytranslate": ytrans,
                }
                data[i] = st.warp(data, translate, map_args=map_args)
        else:
            xtrans = self.rng.random_integers(xtrans_r[0], xtrans_r[1])
            ytrans = self.rng.random_integers(ytrans_r[0], ytrans_r[1])

            map_args = {
                "xtranslate": xtrans,
                "ytranslate": ytrans,
            }
            data = st.warp(data, translate, map_args=map_args)
            return data
コード例 #5
0
ファイル: predict_augmented.py プロジェクト: Coderx7/Apollo
def generate_transformations(image, fileName):
    MAX_IMAGE_PIXEL = 96

    transformed_images = [image]
    # ======================
    # Scale 1 original image
    # ======================
    similarity_transform = SimilarityTransform(scale=0.75)
    image_scaled = warp(image, similarity_transform, mode='wrap')
    transformed_images.append(image_scaled)
    # sc.misc.imsave(folder + '/' + fileName.split('.')[0] + '/' + fileName.split('.')[0] + '_scale1.jpg', image_scaled)

    # ======================
    # Scale 2 original image
    # ======================
    similarity_transform = SimilarityTransform(scale=1.25)
    image_scaled = warp(image, similarity_transform, mode='wrap')
    transformed_images.append(image_scaled)
    # sc.misc.imsave(folder + '/' + fileName.split('.')[0] + '/' + fileName.split('.')[0]  + '_scale2.jpg', image_scaled)

    # =======================================
    # Rotate image by intervals of 45 degrees
    # =======================================
    result = (generate_tranformations_for_rotated_image(image,fileName, degrees) for degrees in [45, 90, 135, 180, 225, 270, 315])
    transformed_images.extend(flatten(result))
    return transformed_images
コード例 #6
0
ファイル: mask.py プロジェクト: johannah/iceview
def find_alpha(base_img, img, model_robust):
    # what type of interpolation
    # 0: nearest-neighbor
    # 1: bi-linear
    warp_order = 1

    output_shape, corner_min = find_output_shape(base_img, model_robust, channel)
    #print("output_shape", output_shape, corner_min)
    #print(model_robust.scale, model_robust.translation, model_robust.rotation)

    # This in-plane offset is the only necessary transformation for the base image
    offset = SimilarityTransform(translation= -corner_min)
    base_warped = warp(base_img[:,:,channel], offset.inverse, order=warp_order,
                      output_shape = output_shape, cval=-1)
    base_color = warp(base_img, offset.inverse, order=warp_order,
                      output_shape = output_shape, cval=-1)
    # warp image corners to new position in mosaic
    transform = (model_robust + offset).inverse

    #img_warped = warp(img[:,:,channel], transform, order=warp_order,
    #                  output_shape=output_shape, cval=-1)
    img_color = warp(img, transform, order=warp_order,
                      output_shape=output_shape, cval=-1)
    #base_mask = (base_warped != -1)
    #base_warped[~base_mask] = 0

    img_mask = (img_warped != -1)
    #img_warped[~img_mask] = 0

    #convert to rgb
    base_alpha = add_alpha(base_color, base_mask)
    img_alpha = np.dstack((img_color, img_mask))
    #base_alpha = np.dstack((base_color, base_mask))

    #plt.imsave(tmp_base, base_alpha )
    #plt.imsave(tmp_img, img_alpha )
    #cmd = [path_to_enblend, tmp_base, tmp_img, '-o', tmp_out]

    #p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
    #output, err = p.communicate(b"input data that is passed to subprocess' stdin")
    #rc = p.returncode
    # remove alpha channel

    if os.path.exists(tmp_out):
        out = imread(tmp_out)[:,:,:3]
    else:
        print("couldnt find out image")
        print(rc, output, err)
        plt.figure()
        plt.imshow(base_alpha)
        plt.figure()#

        plt.imshow(img_alpha)
        plt.show()
        out = base_alpha[:,:,:3]
    #if you don't have enblend, you can use one of these
    #merged_img = simple_merge(base_warped, img_warped, base_mask, img_mask)
    #merged_img = minimum_cost_merge(base_warped, img_warped, base_mask, img_mask)
    #merged_edges = remove_empty_edges(merged_img)
    return tmp_alpha
コード例 #7
0
def gen_data(name):
    reftracker = scio.loadmat('data/images_tracker.00047.mat')
    desttracker = scio.loadmat('data/images_tracker/'+name+'.mat')
    refpos = np.floor(np.mean(reftracker, 0))
    xxc, yyc = np.meshgrid(np.arange(1, 1801, dtype=np.int), np.arange(1, 2001, dtype=np.int))
    #normalize x and y channels
    xxc = (xxc - 600 - refpos[0]) * 1.0 / 600
    yyc = (yyc - 600 - refpos[1]) * 1.0 / 600
    maskimg = Image.open('data/meanask.png')
    maskc = np.array(maskimg, dtype=np.float)
    maskc = np.pad(maskc, (600, 600), 'minimum')
    tform = transform.ProjectiveTransform()
    tform.estimate(reftracker + 600, desttracker + 600)

    img_data = skio.imread('data/images_data/'+name+'.jpg')
    # save org mat
    warpedxx = transform.warp(img_data, tform, output_shape=xxc.shape)
    warpedyy = transform.warp(img_data, tform, output_shape=xxc.shape)
    warpedmask = transform.warp(img_data, tform, output_shape=xxc.shape)
    warpedxx = warpedxx[600:1400, 600:1200, :]
    warpedyy = warpedyy[600:1400, 600:1200, :]
    warpedmask = warpedmask[600:1400, 600:1200, :]
    img_h, img_w, _ = img_data.shape
    mat = np.zeros((img_h, img_w, 6), dtype=np.float)
    mat[:, :, 0] = (img_data[2] * 1.0 - 104.008) / 255
    mat[:, :, 1] = (img_data[1] * 1.0 - 116.669) / 255
    mat[:, :, 2] = (img_data[0] * 1.0 - 122.675) / 255
    scio.savemat('portraitFCN_data/' + name + '.mat', {'img':mat})
    mat_plus = np.zeros((img_h, img_w, 6), dtype=np.float)
    mat_plus[:, :, 0:3] = mat
    mat_plus[:, :, 3] = warpedxx
    mat_plus[:, :, 4] = warpedyy
    mat_plus[:, :, 5] = warpedmask
コード例 #8
0
ファイル: test_warps.py プロジェクト: Cadair/scikit-image
def test_slow_warp_nonint_oshape():
    image = np.random.rand(5, 5)

    with testing.raises(ValueError):
        warp(image, lambda xy: xy,
             output_shape=(13.1, 19.5))

    warp(image, lambda xy: xy, output_shape=(13.0001, 19.9999))
コード例 #9
0
ファイル: mask.py プロジェクト: johannah/iceview
def find_mask(base_name, base_img, img_name, img, model_robust, channel):
    # what type of interpolation
    # 0: nearest-neighbor
    # 1: bi-linear
    warp_order = 1
    output_shape, corner_min = find_output_shape(base_img, model_robust, channel)
    # This in-plane offset is the only necessary transformation for the base image
    offset = SimilarityTransform(translation= -corner_min)
    base_warped = warp(base_img[:,:,channel], offset.inverse, order=warp_order,
                      output_shape = output_shape, cval=-1)
    base_color = warp(base_img, offset.inverse, order=warp_order,
                      output_shape = output_shape, cval=-1)
    # warp image corners to new position in mosaic
    transform = (model_robust + offset).inverse

    img_warped = warp(img[:,:,channel], transform, order=warp_order,
                      output_shape=output_shape, cval=-1)
    img_color = warp(img, transform, order=warp_order,
                      output_shape=output_shape, cval=-1)
    base_mask = (base_warped != -1)
    base_warped[~base_mask] = 0

    img_mask = (img_warped != -1)
    img_warped[~img_mask] = 0
    plt.imsave("img_mask.jpg", img_mask)

    #convert to rgb
    img_alpha = np.dstack((img_color, img_mask))
    base_alpha = np.dstack((base_color, base_mask))

    td = config.tmp_dir
    tmp_base = os.path.join(td, 'tmp_' + '.'.join(base_name.split('.')[:-1]) + '.png')
    tmp_img = os.path.join(td, 'tmp_' + '.'.join(img_name.split('.')[:-1]) + '.png')
    tmp_out = os.path.join(td, 'tmp_out_' + '.'.join(base_name.split('.')[:-1]) + '.png')

    plt.imsave(tmp_base, base_alpha)
    plt.imsave(tmp_img, img_alpha)

    cmd = ['enblend', tmp_base, tmp_img, '-o', tmp_out]

    p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
    output, err = p.communicate(b"input data that is passed to subprocess' stdin")
    rc = p.returncode
    #if you don't have enblend, you can use one of these
    #merged_img = simple_merge(base_warped, img_warped, base_mask, img_mask)
    #merged_img = minimum_cost_merge(base_warped, img_warped, base_mask, img_mask)
    #merged_edges = remove_empty_edges(merged_img)
    # remove alpha channel
    if os.path.exists(tmp_out):
        out = imread(tmp_out)
        oute = remove_empty_alpha(out)
        os.remove(tmp_base)
        os.remove(tmp_img)
        os.remove(tmp_out)
        return oute[:,:,:3]
    else:
        print("Could not find out", tmp_out, rc)
        raise Exception("failed cmd %s" %cmd)
コード例 #10
0
ファイル: test_warps.py プロジェクト: Rapternmn/scikit-image
def test_warp_clip():
    x = 2 * np.ones((5, 5), dtype=np.double)
    matrix = np.eye(3)

    outx = warp(x, matrix, order=0, clip=False)
    assert_array_almost_equal(x, outx)

    outx = warp(x, matrix, order=0, clip=True)
    assert_array_almost_equal(x / 2, outx)
コード例 #11
0
 def translate_images(self, data, xtrans=0, ytrans=0):
     map_args = {
             "xtrans": xtrans,
             "ytrans": ytrans,
             }
     if data.ndim == 3:
         for i in xrange(data[0].shape[0]):
             data[i] = st.warp(data, translate, map_args=map_args)
     else:
         data = st.warp(data, translate, map_args=map_args)
     return data
コード例 #12
0
ファイル: test_warps.py プロジェクト: andreydung/scikit-image
def test_warp_tform():
    x = np.zeros((5, 5), dtype=np.double)
    x[2, 2] = 1
    theta = - np.pi / 2
    tform = SimilarityTransform(scale=1, rotation=theta, translation=(0, 4))

    x90 = warp(x, tform, order=1)
    assert_almost_equal(x90, np.rot90(x))

    x90 = warp(x, tform.inverse, order=1)
    assert_almost_equal(x90, np.rot90(x))
コード例 #13
0
ファイル: test_warps.py プロジェクト: andreydung/scikit-image
def test_warp_identity():
    img = img_as_float(rgb2gray(data.astronaut()))
    assert len(img.shape) == 2
    assert np.allclose(img, warp(img, AffineTransform(rotation=0)))
    assert not np.allclose(img, warp(img, AffineTransform(rotation=0.1)))
    rgb_img = np.transpose(np.asarray([img, np.zeros_like(img), img]),
                           (1, 2, 0))
    warped_rgb_img = warp(rgb_img, AffineTransform(rotation=0.1))
    assert np.allclose(rgb_img, warp(rgb_img, AffineTransform(rotation=0)))
    assert not np.allclose(rgb_img, warped_rgb_img)
    # assert no cross-talk between bands
    assert np.all(0 == warped_rgb_img[:, :, 1])
コード例 #14
0
ファイル: test_warps.py プロジェクト: aeweiwi/scikit-image
def test_warp():
    x = np.zeros((5, 5), dtype=np.uint8)
    x[2, 2] = 255
    x = img_as_float(x)
    theta = - np.pi / 2
    tform = SimilarityTransform(scale=1, rotation=theta, translation=(0, 4))

    x90 = warp(x, tform, order=1)
    assert_array_almost_equal(x90, np.rot90(x))

    x90 = warp(x, tform.inverse, order=1)
    assert_array_almost_equal(x90, np.rot90(x))
コード例 #15
0
ファイル: image.py プロジェクト: histed/PyToolsMH
def align_stack(im, alignNs=r_[0:100], print_status=True, do_plot=False):
    """Realign a stack to an image -- default to mean image from near the start

    Args:
        im
        alignNs: frameNs to average to give the alignment reference image
        print_status: give updates for long calcs to terminal
        do_plot: show a plot with alignment calculations

    Returns:
        aligned stack, same size as input stack, padded with zeros where shifted

    """
    
    # run alignment calculations, saving result in a dataframe
    aligntarg = im[:100,:,:].mean(axis=0)
    tL = []
    nfrdo = im.shape[0]
    if print_status: print('Computing offsets ({} frames)... '.format(nfrdo), end='')
    for iF in range(nfrdo):  
        tL.append(feature.register_translation(aligntarg, im[iF,:,:]))

    regDf = pd.DataFrame(tL, columns=('coords','err','phasediff'))
    regDf['row'] = [x[0][0] for x in tL]
    regDf['col'] = [x[0][1] for x in tL]

    if do_plot:
        gs = mpl.gridspec.GridSpec(2,2)
        fig = plt.figure()
        plt.subplot(gs[0,0])
        plt.plot(regDf.err)
        plt.title('translation-independent error')
        plt.ylabel('RMS error')
        plt.subplot(gs[0,1])
        plt.plot(regDf.col)
        plt.plot(regDf.row)
        plt.title('row and col pixel offsets')
        plt.legend(['col','row'])

    # do the shifts
    regim = im.copy()*0
    maxv = im.max()
    if print_status: print('Aligning frames... ', end='')
    for iF in range(nfrdo): #debug range(nframes):
        regim[iF,:,:] = transform.warp(im[iF,:,:]*1.0/maxv, \
                    transform.SimilarityTransform(translation=(-1*regDf.col[iF],-regDf.row[iF]))) * maxv
        t = transform.warp(im[iF,:,:]*1.0/maxv, \
                    transform.SimilarityTransform(translation=(-1*regDf.col[iF],-regDf.row[iF]))) * maxv
        if print_status and iF % 500 == 0:
            print('%d (%d,%d)'%(iF,-regDf.col[iF],-regDf.row[iF]), end=' ')
    if print_status: print('Done.')

    return regim
コード例 #16
0
ファイル: test_warps.py プロジェクト: aeweiwi/scikit-image
def test_warp_identity():
    lena = img_as_float(rgb2gray(data.lena()))
    assert len(lena.shape) == 2
    assert np.allclose(lena, warp(lena, AffineTransform(rotation=0)))
    assert not np.allclose(lena, warp(lena, AffineTransform(rotation=0.1)))
    rgb_lena = np.transpose(np.asarray([lena, np.zeros_like(lena), lena]),
                            (1, 2, 0))
    warped_rgb_lena = warp(rgb_lena, AffineTransform(rotation=0.1))
    assert np.allclose(rgb_lena, warp(rgb_lena, AffineTransform(rotation=0)))
    assert not np.allclose(rgb_lena, warped_rgb_lena)
    # assert no cross-talk between bands
    assert np.all(0 == warped_rgb_lena[:, :, 1])
コード例 #17
0
ファイル: create_db.py プロジェクト: 1nadequacy/kaggle_ds2
def random_transformation(img1, img2):
    shape_x, shape_y = img1.shape
    rot = (random.random() - 0.5) * math.pi / 4
    trans_x = int((random.random() - 0.5) * shape_x / 8)
    trans_y = int((random.random() - 0.5) * shape_y / 8)
    scale = 1. / 1.1 + random.random() * (1.1 - 1. / 1.1)
    pixel_scale = 1. / 1.1 + random.random() * (1.1 - 1. / 1.1)

    trans = transform.SimilarityTransform(
        scale=scale, rotation=rot, translation=(trans_x, trans_y))
    return \
        (pixel_scale * transform.warp(img1.astype(float), trans, mode='nearest')), \
        (transform.warp(img2.astype(float), trans, mode='nearest'))
コード例 #18
0
ファイル: test_warps.py プロジェクト: andreydung/scikit-image
def test_warp_matrix():
    x = np.zeros((5, 5), dtype=np.double)
    x[2, 2] = 1
    refx = np.zeros((5, 5), dtype=np.double)
    refx[1, 1] = 1

    matrix = np.array([[1, 0, 1], [0, 1, 1], [0, 0, 1]])

    # _warp_fast
    outx = warp(x, matrix, order=1)
    assert_almost_equal(outx, refx)
    # check for ndimage.map_coordinates
    outx = warp(x, matrix, order=5)
コード例 #19
0
ファイル: mosaic.py プロジェクト: johannah/iceview
def warp_img(img, transform, output_shape):
    try:
        warped = warp(img, transform, order=1, mode="constant", output_shape=output_shape, clip=True, cval=0)
        return warped
    except Exception, e:
        logging.error("Error warping image %s img shape %s, output shape %s" % (e, img.shape, output_shape))
        return None
コード例 #20
0
ファイル: geometry.py プロジェクト: Hu1-Li/sudokuextract
def warp_image_by_corner_points_projection(corner_points, image):
    """Given corner points of a Sudoku, warps original selection to a square image.

    :param corner_points:
    :type: corner_points: list
    :param image:
    :type image:
    :return:
    :rtype:

    """
    # Clarify by storing in named variables.
    top_left, top_right, bottom_left, bottom_right = np.array(corner_points)

    top_edge = np.linalg.norm(top_right - top_left)
    bottom_edge = np.linalg.norm(bottom_right - bottom_left)
    left_edge = np.linalg.norm(top_left - bottom_left)
    right_edge = np.linalg.norm(top_right - bottom_right)

    L = int(np.ceil(max([top_edge, bottom_edge, left_edge, right_edge])))
    src = np.array([top_left, top_right, bottom_left, bottom_right])
    dst = np.array([[0, 0], [L - 1, 0], [0, L - 1], [L - 1, L - 1]])

    tr = ProjectiveTransform()
    tr.estimate(dst, src)
    warped_image = warp(image, tr, output_shape=(L, L))
    out = resize(warped_image, (500, 500))

    return out
コード例 #21
0
    def magnify(self, i, subtract_background=True):
        """Extract a source from the image for the purpose of PSF estimation.

        :param int i: index of the source
        :param bool subtract_background=True: do background subtraction

        :rtype: array
        """
        psf_grid = self.psf_grid
        psf_size = psf_grid.psf_size
        psf_mag = psf_grid.psf_mag

        scale = (1/psf_mag, 1/psf_mag)
        translation = self.pos[i]-(psf_size/2-0.5)/psf_mag
        tf = AffineTransform(scale=scale, translation=translation)
        src = warp(self.data.T, tf, output_shape=(psf_size, psf_size),
                   preserve_range=True, order=3)

        # using affine_transform from scipy
        #matrix = np.array([[1/psf_mag, 0],[0, 1/psf_mag]])
        #offset = self.pos[i]-(psf_size/2-0.5)/psf_mag
        #src = np.transpose(affine_transform(self.data, matrix, offset=offset,
        #    output_shape=(psf_size, psf_size), order=3))

        if subtract_background:
            src -= self.flux0[i]

        return ma.masked_where(src >= self.adusat, src)
コード例 #22
0
ファイル: geometry.py プロジェクト: Hu1-Li/sudokuextract
def warp_image_by_interp_borders(edges, image):
    left_edge, top_edge, right_edge, bottom_edge = edges

    left_edge = left_edge[::-1, :]
    bottom_edge = bottom_edge[::-1, :]

    def _mapping_fcn(points):
        map_x = (points[:, 0] / float(points[-1, 0]))
        map_y = (points[:, 1] / float(points[-1, 1]))

        top_mapping = np.array(np.round(map_x * (len(top_edge) - 1)), 'int')
        bottom_mapping = np.array(np.round(map_x * (len(bottom_edge) - 1)), 'int')
        left_mapping = np.array(np.round(map_y * (len(left_edge) - 1)), 'int')
        right_mapping = np.array(np.round(map_y * (len(right_edge) - 1)), 'int')

        map_x = np.array([map_x, map_x]).T
        map_y = np.array([map_y, map_y]).T

        p1s = (left_edge[left_mapping, :] * (1 - map_x)) + (right_edge[right_mapping, :] * map_x)
        p2s = (top_edge[top_mapping, :] * (1 - map_y)) + (bottom_edge[bottom_mapping, :] * map_y)

        return (p1s + p2s) / 2

    d_top_edge = np.linalg.norm(top_edge[0, :] - top_edge[-1, :])
    d_bottom_edge = np.linalg.norm(bottom_edge[0, :] - bottom_edge[-1, :])
    d_left_edge = np.linalg.norm(left_edge[0, :] - left_edge[-1, :])
    d_right_edge = np.linalg.norm(right_edge[0, :] - right_edge[-1, :])

    d = int(np.ceil(max([d_top_edge, d_bottom_edge, d_left_edge, d_right_edge])))
    return warp(image, _mapping_fcn, output_shape=(600, 600))
コード例 #23
0
ファイル: align.py プロジェクト: atbd/PythonUtile
    def run3(self):
        """ Cette fonction test des alternatives à SIFT et ORB. Ne fonctionne pas."""
        for x in xrange(len(self.stack)-1):
            print('Traitement image ' + str(x+1))
            im1,im2 = 255.*gaussian_filter(self.stack[x,...], sqrt(self.initial_sigma**2 - 0.25)), 255.*gaussian_filter(self.stack[x+1,...], sqrt(self.initial_sigma**2 - 0.25))
            im1,im2 = enhance_contrast(normaliser(im1), square(3)), enhance_contrast(normaliser(im2), square(3))
            im1, im2 = normaliser(im1), normaliser(im2)
            
            b = cv2.BRISK()
            #b.create("Feature2D.BRISK")
            
            k1,d1 = b.detectAndCompute(im1,None)
            k2,d2 = b.detectAndCompute(im2,None)
            
            bf = cv2.BFMatcher(cv2.NORM_HAMMING)
            matches = bf.match(d1,d2)
            
            g1,g2 = [],[]
            for i in matches:
                g1.append(k1[i.queryIdx].pt)
                g2.append(k2[i.trainIdx].pt)

            model, inliers = ransac((np.array(g1), np.array(g2)), AffineTransform, min_samples=3, residual_threshold=self.min_epsilon, max_trials=self.max_trials, stop_residuals_sum=self.min_inlier_ratio)
            
            self.stack[x+1,...] = warp(self.stack[x+1,...], AffineTransform(rotation=model.rotation, translation=model.translation), output_shape=self.stack[x+1].shape)

        self.stack = self.stack.astype(np.uint8)
コード例 #24
0
def img_augment(img, translation=0.0, scale=1.0, rotation=0.0, gamma=1.0,
                contrast=1.0, hue=0.0, border_mode='constant'):
    if not (np.all(np.isclose(translation, [0.0, 0.0])) and
            np.isclose(scale, 1.0) and
            np.isclose(rotation, 0.0)):
        img_center = np.array(img.shape[:2]) / 2.0
        scale = (scale, scale)
        transf = transform.SimilarityTransform(translation=-img_center)
        transf += transform.SimilarityTransform(scale=scale, rotation=rotation)
        translation = img_center + translation
        transf += transform.SimilarityTransform(translation=translation)
        img = transform.warp(img, transf, order=3, mode=border_mode)
    if not np.isclose(gamma, 1.0):
        img **= gamma
    colorspace = 'rgb'
    if not np.isclose(contrast, 1.0):
        img = color.convert_colorspace(img, colorspace, 'hsv')
        colorspace = 'hsv'
        img[..., 1:] **= contrast
    if not np.isclose(hue, 0.0):
        img = color.convert_colorspace(img, colorspace, 'hsv')
        colorspace = 'hsv'
        img[..., 0] += hue
        img[img[..., 0] > 1.0, 0] -= 1.0
        img[img[..., 0] < 0.0, 0] += 1.0
    img = color.convert_colorspace(img, colorspace, 'rgb')
    if np.min(img) < 0.0 or np.max(img) > 1.0:
        raise ValueError('Invalid values in output image.')
    return img
コード例 #25
0
ファイル: test_warps.py プロジェクト: aeweiwi/scikit-image
def test_fast_homography():
    img = rgb2gray(data.lena()).astype(np.uint8)
    img = img[:, :100]

    theta = np.deg2rad(30)
    scale = 0.5
    tx, ty = 50, 50

    H = np.eye(3)
    S = scale * np.sin(theta)
    C = scale * np.cos(theta)

    H[:2, :2] = [[C, -S], [S, C]]
    H[:2, 2] = [tx, ty]

    tform = ProjectiveTransform(H)
    coords = warp_coords(tform.inverse, (img.shape[0], img.shape[1]))

    for order in range(4):
        for mode in ('constant', 'reflect', 'wrap', 'nearest'):
            p0 = map_coordinates(img, coords, mode=mode, order=order)
            p1 = warp(img, tform, mode=mode, order=order)

            # import matplotlib.pyplot as plt
            # f, (ax0, ax1, ax2, ax3) = plt.subplots(1, 4)
            # ax0.imshow(img)
            # ax1.imshow(p0, cmap=plt.cm.gray)
            # ax2.imshow(p1, cmap=plt.cm.gray)
            # ax3.imshow(np.abs(p0 - p1), cmap=plt.cm.gray)
            # plt.show()

            d = np.mean(np.abs(p0 - p1))
            assert d < 0.001
コード例 #26
0
ファイル: mlxview.py プロジェクト: terickson/mlxd
def get_overlay(fifo):
    # get the whole FIFO
    ir_raw = fifo.read()
    # trim to 128 bytes
    ir_trimmed = ir_raw[0:128]
    # go all numpy on it
    ir = np.frombuffer(ir_trimmed, np.uint16)
    # set the array shape to the sensor shape (16x4)
    ir = ir.reshape((16, 4))[::-1, ::-1]
    ir = img_as_float(ir)
    # stretch contrast on our heat map
    p2, p98 = np.percentile(ir, (2, 98))
    ir = exposure.rescale_intensity(ir, in_range=(p2, p98))
    # increase even further? (optional)
    # ir = exposure.equalize_hist(ir)

    # turn our array into pretty colors
    cmap = plt.get_cmap('spectral')
    rgba_img = cmap(ir)
    rgb_img = np.delete(rgba_img, 3, 2)

    # align the IR array with the camera
    tform = transform.AffineTransform(
        scale=SCALE, rotation=ROT, translation=OFFSET)
    ir_aligned = transform.warp(
        rgb_img, tform.inverse, mode='constant', output_shape=im.shape)
    # turn it back into a ubyte so it'll display on the preview overlay
    ir_byte = img_as_ubyte(ir_aligned)
    # return buffer
    return np.getbuffer(ir_byte)
コード例 #27
0
ファイル: align.py プロジェクト: atbd/PythonUtile
    def run4(self):
        """ Cette fonction recadre les images grâce à SURF et RANSAC, fonctionne bien."""
        for x in xrange(len(self.stack)-1):
            print('Traitement image ' + str(x+1))
            im1,im2 = 255.*gaussian_filter(self.stack[x,...], sqrt(self.initial_sigma**2 - 0.25)), 255.*gaussian_filter(self.stack[x+1,...], sqrt(self.initial_sigma**2 - 0.25))
            im1,im2 = enhance_contrast(normaliser(im1), square(5)), enhance_contrast(normaliser(im2), square(5))
            im1, im2 = normaliser(im1), normaliser(im2)
            
            b = cv2.SURF()
            #b.create("Feature2D.BRISK")
            
            k1,d1 = b.detectAndCompute(im1,None)
            k2,d2 = b.detectAndCompute(im2,None)
            
            bf = cv2.BFMatcher()
            matches = bf.knnMatch(d1,d2, k=2)

            # Apply ratio test
            good = []
            for m,n in matches:
                if m.distance < 0.75*n.distance:
                    good.append(m)
            
            g1,g2 = [],[]
            for i in good:
                g1.append(k1[i.queryIdx].pt)
                g2.append(k2[i.trainIdx].pt)

            model, inliers = ransac((np.array(g1), np.array(g2)), AffineTransform, min_samples=3, residual_threshold=self.min_epsilon, max_trials=self.max_trials, stop_residuals_sum=self.min_inlier_ratio)
            
            self.stack[x+1,...] = warp(self.stack[x+1,...], AffineTransform(rotation=model.rotation, translation=model.translation), output_shape=self.stack[x+1].shape)

        self.stack = self.stack.astype(np.uint8)
コード例 #28
0
ファイル: train_synthetic.py プロジェクト: rmcgibbo/autogert
def shift(img):
    """Shift a binary image randomly within the frame
    
    Uses a convex hull calculation to make sure it doesn't translate
    the image out of the frame.
    """
    hull = morphology.convex_hull_image(1-img)

    horizontal = np.where(np.sum(hull, axis=0) > 0)[0]
    vertical = np.where(np.sum(hull, axis=1) > 0)[0]

    max_left = -np.min(horizontal)
    max_right = img.shape[1] - np.max(horizontal)
    max_down = -np.min(vertical)
    max_up = img.shape[0] - np.max(vertical)
    
    shift_x = np.random.randint(max_left, max_right)
    shift_y = np.random.randint(max_down, max_up)

    #print "SHIFT", shift_x, shift_y
    
    def shift(xy):
        xy[:, 0] -= shift_x
        xy[:, 1] -= shift_y
        return xy
        
    return np.logical_not(transform.warp(np.logical_not(img), shift))
コード例 #29
0
ファイル: deswirl.py プロジェクト: stefanv/aims2014
def swirl(image, center=None, strength=1, radius=100, rotation=0):
    """Perform a swirl transformation.

    Parameters
    ----------
    image : ndarray
        Input image.
    center : (x,y) tuple or (2,) ndarray
        Center coordinate of transformation.
    strength : float
        The amount of swirling applied.
    radius : float
        The extent of the swirl in pixels.  The effect dies out
        rapidly beyond `radius`.
    rotation : float
        Additional rotation applied to the image.

    Returns
    -------
    swirled : ndarray
        Swirled version of the input.

    """

    if center is None:
        center = np.array(image.shape)[:2] / 2

    warp_args = {'center': center,
                 'rotation': rotation,
                 'strength': strength,
                 'radius': radius}

    return transform.warp(image, _swirl_mapping, map_args=warp_args)
コード例 #30
0
ファイル: dataset.py プロジェクト: ml-lab/cat-generator
    def warp(self, image, matrix):
        """Warp the point's coordinates according to an affine transformation matrix.
        Args:
            image   The image which's dimensions to use.
            matrix  The affine transformation matrix (from scikit-image)
        """
        assert not self.is_normalized

        # This method draws the point as a white pixel on a black image,
        # then warps that image according to the matrix
        # then reads out the new position of the pixel
        # (if its not found / outside of the image then the coordinates will be unchanged).
        # This is a very wasteful process as many pixels have to be warped instead of just one.
        # There is probably a better method for that, but I don't know it.
        image_pnt = np.zeros((image.get_height(), image.get_width()), dtype=np.uint8)
        image_pnt[self.y, self.x] = 255
        image_pnt_warped = tf.warp(
            image_pnt,
            matrix,
            mode=WARP_KEYPOINTS_MODE,
            cval=WARP_KEYPOINTS_CVAL,
            order=WARP_KEYPOINTS_INTERPOLATION_ORDER,
        )
        maxindex = np.argmax(image_pnt_warped)
        if maxindex == 0 and image_pnt_warped[0, 0] < 0.5:
            # dont change coordinates
            # print("Note: Coordinate (%d, %d) not changed" % (self.y, self.x))
            pass
        else:
            (y, x) = np.unravel_index(maxindex, image_pnt_warped.shape)
            self.y = y
            self.x = x
コード例 #31
0
        faces = face_detector(image)

        if not faces:
            print(f'Warning: There is no face in {x}')
            continue
        else:
            for face_id, current_face in enumerate(faces):
                img_name = f'{x[:-4]}_{face_id + 1}'
                current_fl = landmark_locator(image, current_face)

                affine = compute_transformation_matrix(image,
                                                       current_fl,
                                                       False,
                                                       target_face_scale=1.3,
                                                       inverse=False).params
                aligned_face = warp(image, affine, output_shape=(256, 256, 3))
                io.imsave(os.path.join(save_url, f'{img_name}.png'),
                          img_as_ubyte(aligned_face))

    print("Finish Stage 2 ...\n")

    # Stage 3: Face Restore
    print("Running Stage 3: Face Enhancement")
    stage_3_input_mask = "./"
    stage_3_input_face = stage_2_output_dir
    stage_3_output_dir = os.path.join(output_folder, "stage_3_face_output")
    if not os.path.exists(stage_3_output_dir):
        os.makedirs(stage_3_output_dir)

    single_save_url = os.path.join(stage_3_output_dir, "each_img")
コード例 #32
0
def diffrot_map(smap, time=None, dt: u.s = None, pad=False, **diffrot_kwargs):
    """
    Function to apply solar differential rotation to a sunpy map.

    Parameters
    ----------
    smap : `~sunpy.map`
        Original map that we want to transform.
    time : sunpy-compatible time
        date/time at which the input co-ordinate will be rotated to.
    dt : `~astropy.units.Quantity` or `astropy.time.Time`
        Desired interval between the input map and returned map.
    pad : `bool`
        Whether to create a padded map for submaps to don't loose data

    Returns
    -------
    diffrot_map : `~sunpy.map`
        A map with the result of applying solar differential rotation to the
        input map.
    """
    # Only this function needs scikit image
    from skimage import transform
    from sunpy.image.util import to_norm, un_norm
    # Import map here for performance reasons.
    import sunpy.map

    if (time is not None) and (dt is not None):
        raise ValueError('Only a time or an interval is accepted')
    elif not (time or dt):
        raise ValueError(
            'Either a time or an interval (`dt=`) needs to be provided')
    elif time:
        new_time = parse_time(time)
        dt = (new_time - smap.date).to(u.s)
    else:
        new_time = smap.date + dt

    # Check for masked maps
    if smap.mask is not None:
        smap_data = np.ma.array(smap.data, mask=smap.mask)
    else:
        smap_data = smap.data

    submap = False
    # Check whether the input is a submap
    if ((2 * smap.rsun_obs >
         smap.top_right_coord.Tx - smap.bottom_left_coord.Tx)
            or (2 * smap.rsun_obs >
                smap.top_right_coord.Ty - smap.bottom_left_coord.Ty)):

        submap = True
        if pad:
            # Calculating the largest distance between the corners and their rotation values
            deltax = deltay = 0
            for corner in product(*product([0 * u.pix], smap.dimensions)):
                corner_world = smap.pixel_to_world(*corner)
                corner_world_rotated = solar_rotate_coordinate(
                    corner_world, new_time, **diffrot_kwargs)
                corner_px_rotated = smap.world_to_pixel(corner_world_rotated)
                dx = np.abs(corner_px_rotated.x - corner[0])
                dy = np.abs(corner_px_rotated.y - corner[1])
                deltax = dx if dx > deltax else deltax
                deltay = dy if dy > deltay else deltay

            deltax = np.int(np.ceil(deltax.value))
            deltay = np.int(np.ceil(deltay.value))
            # Create a new `smap` with the padding around it
            smap_data = np.pad(smap.data, ((deltay, deltay), (deltax, deltax)),
                               'constant',
                               constant_values=0)
            smap_meta = deepcopy(smap.meta)
            smap_meta['naxis2'], smap_meta['naxis1'] = smap_data.shape
            smap_meta['crpix1'] += deltax
            smap_meta['crpix2'] += deltay
            smap = sunpy.map.Map(smap_data, smap_meta)

    warp_args = {'smap': smap, 'dt': dt}
    warp_args.update(diffrot_kwargs)
    # Apply solar differential rotation as a scikit-image warp
    out = transform.warp(to_norm(smap_data),
                         inverse_map=_warp_sun_coordinates,
                         map_args=warp_args)

    # Recover the original intensity range.
    out = un_norm(out, smap.data)

    # Update the meta information with the new date and time, and reference pixel.
    out_meta = deepcopy(smap.meta)
    if out_meta.get('date_obs', False):
        del out_meta['date_obs']
    out_meta['date-obs'] = "{}".format(new_time.strftime('%Y-%m-%dT%H:%M:%S'))

    if submap:
        crval_rotated = solar_rotate_coordinate(smap.reference_coordinate,
                                                new_time, **diffrot_kwargs)
        out_meta['crval1'] = crval_rotated.Tx.value
        out_meta['crval2'] = crval_rotated.Ty.value

    return sunpy.map.Map((out, out_meta))
コード例 #33
0
from skimage import data
from skimage import transform as tf
from skimage.feature import CENSURE
from skimage.color import rgb2gray

import matplotlib.pyplot as plt
import utils.Image_loader as il

img_orig = il.get_sample()
tform = tf.AffineTransform(rotation=0.5, scale=(0.5, 0.5))
img_warp = tf.warp(img_orig, tform)

detector = CENSURE()

fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(12, 6))

detector.detect(img_orig)

ax[0].imshow(img_orig, cmap=plt.cm.gray)
ax[0].scatter(detector.keypoints[:, 1],
              detector.keypoints[:, 0],
              2**detector.scales,
              facecolors='none',
              edgecolors='r')
ax[0].set_title("Original Image")

detector.detect(img_warp)

ax[1].imshow(img_warp, cmap=plt.cm.gray)
ax[1].scatter(detector.keypoints[:, 1],
              detector.keypoints[:, 0],
コード例 #34
0
def deshear(filename):
    image = io.imread(filename)
    distortion = image.shape[1] - image.shape[0]
    shear = tf.AffineTransform(shear=math.atan(distortion / image.shape[0]))
    return tf.warp(image, shear)[:, distortion:]
コード例 #35
0
def RndTform(img,val):
    Ih,Iw = img[0].shape[:2]
    
    sgn = torch.randint(0,2,(1,)).item() * 2 - 1

    if sgn>0:
        dw = val
        dh = 0
    else:
        dw = 0
        dh = val

    def rd(d): return torch.empty(1).uniform_(-d,d).item()
    def fd(d): return torch.empty(1).uniform_(-dw,d).item()

    # generate a random projective transform
    # adapted from https://navoshta.com/traffic-signs-classification/
    tl_top = rd(dh)
    tl_left = fd(dw)
    bl_bottom = rd(dh)
    bl_left = fd(dw)
    tr_top = rd(dh)
    tr_right = fd( min(Iw * 3/4 - tl_left,dw) )
    br_bottom = rd(dh)
    br_right = fd( min(Iw * 3/4 - bl_left,dw) )

    tform = stf.ProjectiveTransform()
    tform.estimate(np.array((
        (tl_left, tl_top),
        (bl_left, Ih - bl_bottom),
        (Iw - br_right, Ih - br_bottom),
        (Iw - tr_right, tr_top)
    )), np.array((
        [0, 0 ],
        [0, Ih - 1 ],
        [Iw-1, Ih-1 ],
        [Iw-1, 0]
    )))

    # determine shape of output image, to preserve size
    # trick take from the implementation of skimage.transform.rotate
    corners = np.array([
        [0, 0 ],
        [0, Ih - 1 ],
        [Iw-1, Ih-1 ],
        [Iw-1, 0]
    ])

    corners = tform.inverse(corners)
    minc = corners[:, 0].min()
    minr = corners[:, 1].min()
    maxc = corners[:, 0].max()
    maxr = corners[:, 1].max()
    out_rows = maxr - minr + 1
    out_cols = maxc - minc + 1
    output_shape = np.around((out_rows, out_cols))

    # fit output image in new shape
    translation = (minc, minr)
    tform4 = stf.SimilarityTransform(translation=translation)
    tform = tform4 + tform
    # normalize
    tform.params /= tform.params[2, 2]
    

    ret = []
    for i in range(len(img)):
        img2 = stf.warp(img[i], tform, output_shape=output_shape, cval=1.0)
        img2 = stf.resize(img2, (Ih,Iw), preserve_range=True).astype(np.float32)
        ret.append(img2)


    return ret
コード例 #36
0
print(tform2(coord))
print(tform2.inverse(tform(coord)))

######################################################################
# Image warping
# =============
#
# Geometric transformations can also be used to warp images:

text = data.text()

tform = tf.SimilarityTransform(scale=1,
                               rotation=math.pi / 4,
                               translation=(text.shape[0] / 2, -100))

rotated = tf.warp(text, tform)
back_rotated = tf.warp(rotated, tform.inverse)

fig, ax = plt.subplots(nrows=3)

ax[0].imshow(text, cmap=plt.cm.gray)
ax[1].imshow(rotated, cmap=plt.cm.gray)
ax[2].imshow(back_rotated, cmap=plt.cm.gray)

for a in ax:
    a.axis('off')

plt.tight_layout()

######################################################################
# Parameter estimation
コード例 #37
0
def transform_IR_im_to_vis_coordinate_system(im):
    im = undistort_IR_im(im)
    T = transform.AffineTransform(T_v2IR)
    return transform.warp(im, T, output_shape=(hv, wv))
コード例 #38
0
def main():
    path = '/usr/local/hdd/rita/DL/model_ZT113_150/'
    output_path = '/usr/local/hdd/rita/registration/ransac/brief/mostSimilar/bla/'
    real_path = '/usr/local/hdd/rita/registration/ransac/brief/mostSimilar/original_he/'
    masks = sorted([f for f in os.listdir(path) if f.endswith('_dl.png')])
    he = sorted([f for f in os.listdir(real_path) if f.endswith('.tif')])

    mostSimilar_mask = getMostSimilar(masks, path)
    mostSimilarImg = io.imread(os.path.join(path, mostSimilar_mask))
    mostSimilar = mostSimilar_mask.split(".small")[0]
    mostSimilarRealImg = io.imread(os.path.join(real_path, mostSimilar))
    if False:
        imageio.imwrite(output_path + mostSimilar + '_ransac.png',
                        img_as_ubyte(mostSimilarRealImg))

        out = open(os.path.join(output_path, "eval.txt"), "a")
        out.write("Base: " + mostSimilar + "\n")

        mostSimilarImg = rgb2gray(mostSimilarImg)

        for i in range(len(masks)):
            if not masks[i] == mostSimilar_mask:
                mask_img = rgb2gray(io.imread(os.path.join(path, masks[i])))

                real_name = masks[i].split(".small")[0]
                real_img = io.imread(os.path.join(real_path, real_name))

                rescale_trans = np.amin([
                    real_img.shape[0] / mask_img.shape[0],
                    real_img.shape[1] / mask_img.shape[1]
                ])

                trans = start_ransac(img1=mostSimilarImg,
                                     img2=mask_img,
                                     brief=True,
                                     common_factor=0.25)
                rescaled_transform = rescale_transform_matrix(
                    trans, rescale_trans)

                reg = warp(real_img, np.linalg.inv(rescaled_transform))
                imageio.imwrite(output_path + real_name + '_ransac.png',
                                img_as_ubyte(reg))

                out.write(
                    real_name + "\t" +
                    str(difference(mostSimilarImg, mask_img)) + "\t" + str(
                        difference(
                            mostSimilarImg,
                            rgb2gray(warp(mask_img, np.linalg.inv(trans))))) +
                    "\n")

        out.close()
    chan_output_path = '/usr/local/hdd/rita/registration/ransac/brief/mostSimilar/channels_reg/'
    chan_path = '/usr/local/hdd/rita/registration/ransac/brief/mostSimilar/channels/'
    light = sorted(
        [f for f in os.listdir(chan_path) if f.endswith('_light.jpg')])
    he_reg = sorted(
        [f for f in os.listdir(output_path) if f.endswith('_ransac.png')])

    cha0 = sorted(
        [f for f in os.listdir(chan_path) if f.endswith('_ch00.tif')])
    cha2 = sorted(
        [f for f in os.listdir(chan_path) if f.endswith('_ch02.tif')])
    cha3 = sorted(
        [f for f in os.listdir(chan_path) if f.endswith('_ch03.tif')])
    cha4 = sorted(
        [f for f in os.listdir(chan_path) if f.endswith('_ch04.tif')])
    cha5 = sorted(
        [f for f in os.listdir(chan_path) if f.endswith('_ch05.tif')])

    for i in range(len(he_reg)):
        light_img = io.imread(os.path.join(chan_path, light[i]))
        he_img = io.imread(os.path.join(output_path, he_reg[i]))
        print(he_img.shape)
        if light_img.shape[0] * light_img.shape[1] > he_img.shape[
                0] * he_img.shape[1]:
            first_rescale = np.amin([
                light_img.shape[0] / he_img.shape[0],
                light_img.shape[1] / he_img.shape[1]
            ])
            light_resc = transform.rescale(light_img,
                                           1 / first_rescale,
                                           multichannel=False)
            print(light_img.shape)
            print(light_resc.shape)
            common_factor = 1 / np.amax([
                light_resc.shape[0] / 150, light_resc.shape[1] / 150,
                he_img.shape[0] / 150, he_img.shape[1] / 150
            ])
            trans = start_ransac(img1=rgb2gray(he_img),
                                 img2=rgb2gray(light_resc),
                                 brief=False,
                                 common_factor=common_factor)

            rescaled_transform = rescale_transform_matrix(
                trans, 1 / first_rescale)
            reg = warp(light_img, np.linalg.inv(rescaled_transform))

            ch0 = io.imread(os.path.join(chan_path, cha0[i]))
            ch2 = io.imread(os.path.join(chan_path, cha2[i]))
            ch3 = io.imread(os.path.join(chan_path, cha3[i]))
            ch4 = io.imread(os.path.join(chan_path, cha4[i]))
            ch5 = io.imread(os.path.join(chan_path, cha5[i]))

            reg_ch0 = warp(ch0, np.linalg.inv(rescaled_transform))
            reg_ch2 = warp(ch2, np.linalg.inv(rescaled_transform))
            reg_ch3 = warp(ch3, np.linalg.inv(rescaled_transform))
            reg_ch4 = warp(ch4, np.linalg.inv(rescaled_transform))
            reg_ch5 = warp(ch5, np.linalg.inv(rescaled_transform))

            imageio.imwrite(chan_output_path + cha0[i] + '_ransac.png',
                            img_as_ubyte(reg_ch0))
            imageio.imwrite(chan_output_path + cha2[i] + '_ransac.png',
                            img_as_ubyte(reg_ch2))
            imageio.imwrite(chan_output_path + cha3[i] + '_ransac.png',
                            img_as_ubyte(reg_ch3))
            imageio.imwrite(chan_output_path + cha4[i] + '_ransac.png',
                            img_as_ubyte(reg_ch4))
            imageio.imwrite(chan_output_path + cha5[i] + '_ransac.png',
                            img_as_ubyte(reg_ch5))

            imageio.imwrite(chan_output_path + light[i] + '_ransac.png',
                            img_as_ubyte(reg))
        else:
            first_rescale = np.amin([
                he_img.shape[0] / light_img.shape[0],
                he_img.shape[1] / light_img.shape[1]
            ])
            he_resc = transform.rescale(he_img,
                                        1 / first_rescale,
                                        multichannel=False)

            common_factor = 1 / np.amax([
                light_img.shape[0] / 150, light_img.shape[1] / 150,
                he_resc.shape[0] / 150, he_resc.shape[1] / 150
            ])
            trans = start_ransac(img1=rgb2gray(he_resc),
                                 img2=rgb2gray(light_img),
                                 brief=False,
                                 common_factor=common_factor)

            reg = warp(light_img, np.linalg.inv(trans))
            imageio.imwrite(chan_output_path + light[i] + '_ransac.png',
                            img_as_ubyte(reg))
コード例 #39
0
def start_ransac(img1, img2, brief, common_factor=0.25):

    #https://www.researchgate.net/publication/264197576_scikit-image_Image_processing_in_Python
    img1 = transform.rescale(img1, common_factor, multichannel=False)
    img2 = transform.rescale(img2, common_factor, multichannel=False)

    print(img1.shape)
    print(img2.shape)

    if brief:
        #BRIEF
        keypoints1 = corner_peaks(corner_harris(img1), min_distance=5)
        keypoints2 = corner_peaks(corner_harris(img2), min_distance=5)

        extractor = BRIEF()

        extractor.extract(img1, keypoints1)
        keypoints1 = keypoints1[extractor.mask]
        descriptors1 = extractor.descriptors

        extractor.extract(img2, keypoints2)
        keypoints2 = keypoints2[extractor.mask]
        descriptors2 = extractor.descriptors

        matches12 = match_descriptors(descriptors1,
                                      descriptors2,
                                      cross_check=True)
    else:
        #ORB
        orb = ORB(n_keypoints=1000, fast_threshold=0.05)

        orb.detect_and_extract(img1)
        keypoints1 = orb.keypoints
        desciptors1 = orb.descriptors

        orb.detect_and_extract(img2)
        keypoints2 = orb.keypoints
        desciptors2 = orb.descriptors

        matches12 = match_descriptors(desciptors1,
                                      desciptors2,
                                      cross_check=True)

    src = keypoints2[matches12[:, 1]][:, ::-1]
    dst = keypoints1[matches12[:, 0]][:, ::-1]

    model_robust, inliers = \
        ransac((src, dst), transform.SimilarityTransform, min_samples=4, residual_threshold=2)

    #r, c = img2.shape[:2]

    #corners = np.array([[0, 0],
    #    [0, r],
    #    [c, 0],
    #[c,r]])

    #warped_corners = model_robust(corners)
    #all_corners = np.vstack((warped_corners, corners))

    #corner_min = np.min(all_corners, axis=0)
    #corner_max = np.max(all_corners, axis=0)

    #output_shape = (corner_max - corner_min)
    #output_shape = np.ceil(output_shape[::-1])

    #offset = transform.SimilarityTransform(translation=-corner_min)

    #Not really cool rescaling
    #offset_tmatrix =  np.copy(offset.params)
    #offset_tmatrix[0, 2] = offset_tmatrix[0, 2]/common_factor
    #offset_tmatrix[0, 2] = offset_tmatrix[0, 2]/rescale_trans
    #offset_tmatrix[1, 2] = offset_tmatrix[1, 2]/common_factor
    #offset_tmatrix[1, 2] = offset_tmatrix[1, 2]/rescale_trans

    model_robust_tmatrix = np.copy(model_robust.params)
    model_robust_tmatrix[0, 2] = model_robust_tmatrix[0, 2] / common_factor
    #model_robust_tmatrix[0, 2] = model_robust_tmatrix[0, 2]/rescale_trans
    model_robust_tmatrix[1, 2] = model_robust_tmatrix[1, 2] / common_factor
    #model_robust_tmatrix[1, 2] = model_robust_tmatrix[1, 2]/rescale_trans

    #model_robust_offset_tmatrix = np.copy((model_robust+offset).params)
    #model_robust_offset_tmatrix[0, 2] = offset_tmatrix[0, 2] + model_robust_tmatrix[0, 2]
    #model_robust_offset_tmatrix[1, 2] = offset_tmatrix[1, 2] + model_robust_tmatrix[1, 2]

    #factor2 = 1.05
    #img3_ = warp(img3, np.linalg.inv(offset_tmatrix), output_shape=(img3.shape[0]*factor2, img3.shape[1]*factor2))
    #img4_ = warp(img4, np.linalg.inv(model_robust_offset_tmatrix), output_shape=(img3.shape[0]*factor2, img3.shape[1]*factor2))

    img1_ = img1  #= warp(img1, offset.inverse, output_shape=output_shape, cval=-1)
    img2_ = warp(
        img2, model_robust.inverse, cval=-1
    )  #(model_robust+offset).inverse, output_shape=output_shape, cval=-1)

    fig = plt.figure(constrained_layout=True)
    gs = fig.add_gridspec(3, 2)
    f_ax1 = fig.add_subplot(gs[0, :])
    plot_matches(f_ax1, img1, img2, keypoints1, keypoints2, matches12)
    f_ax1.axis('off')
    #f_ax1.set_title(filename1+" vs. "+filename2)
    f_ax2 = fig.add_subplot(gs[1, 0])
    f_ax2.imshow(img1)
    f_ax2.axis('off')
    f_ax2.set_title("img1")
    f_ax3 = fig.add_subplot(gs[1, 1])
    f_ax3.imshow(img1_)
    f_ax3.axis('off')
    f_ax3.set_title("img1_")
    #f_ax4 = fig.add_subplot(gs[1, 2])
    #f_ax4.imshow(img3_)
    #f_ax4.axis('off')
    #f_ax4.set_title("img3_")
    f_ax5 = fig.add_subplot(gs[2, 0])
    f_ax5.imshow(img2)
    f_ax5.axis('off')
    f_ax5.set_title("img2")
    f_ax6 = fig.add_subplot(gs[2, 1])
    f_ax6.imshow(img2_)
    f_ax6.axis('off')
    f_ax6.set_title("img2_")
    #f_ax7 = fig.add_subplot(gs[2, 2])
    #f_ax7.imshow(img4_)
    #f_ax7.axis('off')
    #f_ax7.set_title("img4_")
    plt.show()

    return model_robust_tmatrix
    '''
コード例 #40
0
def mix_lesions(lesion_bg, lesion_fg, mask_bg, mask_fg, gauss_sigma=0):
    height, width = lesion_bg.shape[:2]

    # Histogram matching
    for i in range(3):
        lesion_bg_masked = ma.array(lesion_bg[..., i], mask=~mask_bg)
        lesion_fg[..., i] = _histogram_matching(lesion_fg[..., i],
                                                lesion_bg_masked)

    rotation = randint(0, 90)
    lesion_fg = rotate(lesion_fg,
                       rotation,
                       mode='reflect',
                       preserve_range=True).astype('uint8')
    mask_fg = rotate(mask_fg,
                     rotation,
                     mode='constant',
                     cval=0,
                     preserve_range=True).astype('uint8')

    cm_fg = center_of_mass(mask_fg)
    cm_bg = center_of_mass(mask_bg)

    tf_ = SimilarityTransform(scale=1,
                              rotation=0,
                              translation=(cm_fg[1] - cm_bg[1],
                                           cm_fg[0] - cm_bg[0]))

    lesion_fg = warp(lesion_fg, tf_, mode='constant',
                     preserve_range=True).astype('uint8')
    mask_fg = warp(mask_fg, tf_, mode='constant', cval=0,
                   preserve_range=True).astype('uint8')
    cm_fg = center_of_mass(mask_fg)

    # Cut mask
    cut_mask = np.zeros(mask_fg.shape)
    cut_mask[cut_mask.shape[0] // 2:, :] = 255
    cut_mask = rotate(cut_mask,
                      randint(0, 90),
                      mode='reflect',
                      preserve_range=True).astype('uint8')
    tf_cm = SimilarityTransform(scale=1,
                                rotation=0,
                                translation=(width // 2 - cm_bg[1],
                                             height // 2 - cm_bg[0]))
    cut_mask = warp(cut_mask,
                    tf_cm,
                    mode='constant',
                    cval=0,
                    preserve_range=True).astype('uint8')
    mask_fg = np.where(np.logical_and(mask_fg, cut_mask), 255, 0)

    # Calculate mask bounding box
    coords = np.argwhere(mask_fg == 255)
    y0, x0 = coords.min(axis=0)
    y1, x1 = coords.max(axis=0) + 1

    # Convert mask to 3 channels
    mask_fg = np.dstack((mask_fg, mask_fg, mask_fg))
    # Convert it to float
    mask_fg = mask_fg.astype('float')
    # And normalize it to 0.0~1.0
    mask_fg *= (1.0 / 255.0)

    # Apply Gaussian Blur to the mask
    mask_fg = gaussian(mask_fg,
                       sigma=gauss_sigma,
                       multichannel=True,
                       preserve_range=True)

    out = np.copy(lesion_bg)

    out_ = (lesion_bg * (1.0 - mask_fg) + lesion_fg * mask_fg).astype('uint8')
    out = np.where(mask_fg == 0, lesion_bg, out_)

    return out
コード例 #41
0
def test_const_cval_out_of_range():
    img = np.random.randn(100, 100)
    cval = -10
    warped = warp(img, AffineTransform(translation=(10, 10)), cval=cval)
    assert np.sum(warped == cval) == (2 * 100 * 10 - 10 * 10)
コード例 #42
0
    def findGrains(self, minGrainSize=10):
        # Check a EBSD map is linked
        self.checkEbsdLinked()

        # Initialise the grain map
        self.grains = np.copy(self.boundaries)

        self.grainList = []

        # List of points where no grain has been set yet
        unknownPoints = np.where(self.grains == 0)
        numPoints = unknownPoints[0].shape[0]
        totalPoints = numPoints
        # Start counter for grains
        grainIndex = 1

        # Loop until all points (except boundaries) have been assigned
        # to a grain or ignored
        while numPoints > 0:
            # report progress
            yield 1. - numPoints / totalPoints

            # Flood fill first unknown point and return grain object
            currentGrain = self.floodFill(unknownPoints[1][0],
                                          unknownPoints[0][0], grainIndex)

            grainSize = len(currentGrain)
            if grainSize < minGrainSize:
                # if grain size less than minimum, ignore grain and set
                # values in grain map to -2
                for coord in currentGrain.coordList:
                    self.grains[coord[1], coord[0]] = -2
            else:
                # add grain and size to lists and increment grain label
                self.grainList.append(currentGrain)
                grainIndex += 1

            # update unknown points
            unknownPoints = np.where(self.grains == 0)
            numPoints = unknownPoints[0].shape[0]

        # Now link grains to those in ebsd Map
        # Warp DIC grain map to EBSD frame
        dicGrains = self.grains
        warpedDicGrains = tf.warp(
            np.ascontiguousarray(dicGrains.astype(float)),
            self.ebsdTransformInv,
            output_shape=(self.ebsdMap.yDim, self.ebsdMap.xDim),
            order=0).astype(int)

        # Initalise list to store ID of corresponding grain in EBSD map.
        # Also stored in grain objects
        self.ebsdGrainIds = []

        for i in range(len(self.grainList)):
            # Find grain by masking the native ebsd grain image with
            # selected grain from the warped dic grain image. The modal
            # value is the EBSD grain label.
            modeId, _ = mode(self.ebsdMap.grains[warpedDicGrains == i + 1])

            self.ebsdGrainIds.append(modeId[0] - 1)
            self.grainList[i].ebsdGrainId = modeId[0] - 1
            self.grainList[i].ebsdGrain = self.ebsdMap.grainList[modeId[0] - 1]
            self.grainList[i].ebsdMap = self.ebsdMap
コード例 #43
0
ファイル: warp_text.py プロジェクト: tuxkp/ay250
from skimage import data, transform

import numpy as np
import matplotlib.pyplot as plt

image = data.text()

plt.imshow(image, cmap=plt.cm.gray)

target = np.array(plt.ginput(4))
source = np.array([(0, 0), (0, 50), (300, 50), (300, 0)])

plt.close()

pt = transform.ProjectiveTransform()
pt.estimate(source, target)

warped = transform.warp(image, pt, output_shape=(50, 300))

f, (ax0, ax1) = plt.subplots(1, 2)
ax0.imshow(image, cmap=plt.cm.gray)
ax1.imshow(warped, cmap=plt.cm.gray)
plt.show()
コード例 #44
0
        mnist = MNIST()
        np.random.seed(1234)
        k = 0
        params = []
        for i in range(30):
            print('it ' + str(i))

            tfparam = np.array([[1.1, 0., 0.], [0., 1.1, 0.], [0., 0., 1.]])
            tfseed = (np.random.rand(3, 3) - 0.5) * np.array([[0.2, 0.2, 6], [0.2, 0.2, 6], [0, 0, 0]])
            print(tfseed)
            tfparam += tfseed
            tform = transform.AffineTransform(tfparam)
            print(tfparam)

            # variant_mnist = [transform.warp(x + 0.5, tform) for x in mnist.test_data[:10, :, :, :]]
            variant_mnist = [transform.warp(x + 0.5, tform) for x in mnist.test_data]
            variant_mnist = np.reshape(variant_mnist, (10000, 28, 28, 1)) - 0.5

            mnist_test_result = model.model.predict(variant_mnist)
            amr = np.argmax(mnist_test_result, axis=1)
            aml = np.argmax(mnist.test_labels, axis=1)
            wrong_indices = (amr != aml)
            right_indices = ~wrong_indices
            acc = (1 - np.sum(wrong_indices) / aml.shape[0])
            print("acc = %f" % acc)
            if acc > 0.95:
                print('save #%d' % i)
                params.append(tfparam)
                np.save('exp_affine_in_%d.npy' % k, mnist_test_result)
                print(mnist_test_result.shape)
                k += 1
コード例 #45
0
ファイル: stitch.py プロジェクト: positronlee/ML_DeepCT
def merge_full_image(data_left,
                     data_right,
                     left_index,
                     right_index,
                     model_robust,
                     verbose=1,
                     hist_match=1):
    """
	merge the whole CT slice
	Parameters:
	--------------------------------------------
	data_left: to be stitched left CT volumn
	data_right: to be stitched right CT volumn
	left_index: index of CT 
	
	"""
    # get the slice of the left image and right image
    image_left = np.squeeze(data_left[:, :, left_index])
    image_right = np.squeeze(data_right[:, 1:, right_index])

    # match histogram
    if hist_match:
        image_left = histogram_matching(image_left, image_right, verbose=0)

    # get the size of the image
    size_right = np.shape(image_right)
    size_left = np.shape(image_left)

    #############################################################################
    ### calculate the output shape of the stitched image
    #############################################################################
    # number of columns for the stitched image
    n_col = np.add(size_right, size_left)[1]

    #     image0 = image_left
    #     image1 = image_right

    r, c = image_right.shape[:2]
    c = n_col
    corners = np.array([[0, 0], [0, r], [c, 0], [c, r]])

    warped_corners = model_robust(corners)  # also include rotation
    all_corners = np.vstack((warped_corners, corners))

    corner_min = np.min(all_corners, axis=0)
    corner_max = np.max(all_corners, axis=0)

    output_shape = (corner_max - corner_min)
    if warped_corners[0][0] < 0:
        offset_box = -warped_corners[0][0] + 102
    else:
        offset_box = warped_corners[0][0]
    output_shape[0] = n_col - offset_box
    output_shape = np.ceil(output_shape[::-1])
    #############################################################################
    ### merge the two image by shifting them to the correct positions
    #############################################################################
    offset = EuclideanTransform(translation=(0, 0))  # move the left image

    image0_ = warp(image_left,
                   offset.inverse,
                   output_shape=output_shape,
                   cval=-1)
    # pad -1 to the left image to the same shape as output_shape

    offset = EuclideanTransform(translation=(size_left[1] - 99, 0))
    # move the right image

    image1_ = warp(image_right, (model_robust + offset).inverse,
                   output_shape=output_shape,
                   cval=-1)
    # use the image registion model - model_robust with the offset translation movement

    image_merge = image1_ + image0_
    image_merge[np.where(
        image_merge > 0)] = (image_merge[np.where(image_merge > 0)] -
                             2) / 2  # average the overlap part
    ##############################################################################
    if verbose:
        plt.figure(figsize=(10, 5))
        plt.subplot(121)
        plt.imshow(image_left, cmap='gray')
        plt.title('left image', fontsize=20)
        plt.axis('off')

        plt.subplot(122)
        plt.imshow(image_right, cmap='gray')
        plt.title('right image', fontsize=20)
        plt.axis('off')

        plt.figure(figsize=(10, 5))
        plt.imshow(image_merge[10:-10, :], cmap='gray')
        plt.title('stitched image', fontsize=20)
        plt.axis('off')

    return (image_merge)
コード例 #46
0
def rotate(img, rot_amt):
    rot = tf.AffineTransform(rotation=rot_amt)
    img = tf.warp(img, inverse_map=rot, mode='reflect')
    return img
コード例 #47
0
trans_names = ['similarity', 'affine', 'piecewise-affine',
               'projective']  # transform list

fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 10))

for tform_type, axis in zip(trans_names,
                            axes.flat):  # looping through transforms

    # build the model
    tform = transform.estimate_transform(tform_type, np.array(src),
                                         np.array(dst))

    # use the model
    raw_corrected_Z = transform.warp(moving,
                                     inverse_map=tform.inverse,
                                     output_shape=np.shape(moving))

    # one way to do correlations
    corr = stats.pearsonr(np.reshape(fixed, [1024 * 1024, 1]),
                          np.reshape(raw_corrected_Z, [1024 * 1024, 1]))[0][0]

    # visualize the transformation
    axis.set_title(tform_type + ' - Pearson corr: ' + str(np.round(corr, 3)))
    axis.imshow(raw_corrected_Z)

fig.suptitle('Different transforms applied to the images', y=1.03)
fig.tight_layout()

##############################################################################
# delete the h5_file
def _calculate_transform_matrix(frame_RGB, frame_thermal,
                               thermal_canny_percentage = 4,
                               rgb_canny_percentage = 4,
                               division_depth = 6,
                               desired_thermal_scale = 1,
                               denoise_weight_rgb = 0.3, denoise_weight_thermal = 0.1,
                               degree = 2,
                               plot = False):
    '''
    Calculate the second degree polynomial transformation matrix to map the
    thermal frame on the RGB frame.

    Parameters
    ----------
    frame_RGB : ndarray
        RGB frame without alpha.
    frame_thermal : ndarray
        2D Thermal frame resized to have the same size with RGB frame.
    thermal_canny_percentage : int, optional
        Coverage of the edges for the canny output. Recommended: 2 to 6, default: 4.
    rgb_canny_percentage : int, optional
        Coverage of the edges for the canny output. Recommended: 3 to 8, default: 4.
    division_depth : int, optional
        Maximum region count for the vertical division. Needs to be chosen proportionally
        with the frames' quality and information density. Smallest division should
        not have a smaller width than the expected shift, but the outliers are mostly
        handled. Default: 8.

    Returns
    -------
    ndarray
        2x6 second degree polynomial transformation matrix.
    '''

    rgb_edge = _canny_with_TV(frame_RGB, denoise_weight_rgb, rgb_canny_percentage)
    therm_edge = _canny_with_TV(frame_thermal, denoise_weight_thermal, thermal_canny_percentage)
    
    orig_width, orig_height = rgb_edge.shape
    half_width, half_height = int(orig_width/2), int(orig_height/2)
    
    rgb_proc = np.zeros((orig_width*2, orig_height*2))
    rgb_proc[half_width:half_width*3,half_height:half_height*3] = rgb_edge
    therm_proc = np.zeros((orig_width*2, orig_height*2))
    therm_proc[half_width:half_width*3,half_height:half_height*3] = therm_edge
    max_width, max_height = rgb_proc.shape[:2]
    
    # Divide image into vertical areas and save the centers before a possible shift.
    points_x = []
    points_y = []
    weights = []
    for region_count in (np.logspace(0,division_depth,division_depth, base = 2)).astype(int):

        # Determine division limits
        region_divisions_with_zero = np.linspace(0, max_width, num = region_count,
                                                 endpoint = False, dtype = int)
        region_divisions = region_divisions_with_zero[1:]
        all_region_bounds = np.append(region_divisions_with_zero, max_width)
        # Divide the frames into the regions
        lum_regions = np.hsplit(rgb_proc,region_divisions)
        therm_regions = np.hsplit(therm_proc,region_divisions)
        
        region_divisions_with_zero = np.insert(region_divisions, 0, 0)
        # Calculate the shifts for each region and save the points. Weight of a point
        # is proportional with its size ( thus, amount of information) and its
        # closeness to the center of the image ( which is the expected location
        # of the baby)
        for ind, (lumreg, thermreg) in enumerate(zip(lum_regions, therm_regions)):
            
            shifts, error, _ = feature.register_translation(thermreg.astype(int), lumreg.astype(int), 10)
            min_h, min_w = shifts
    
            reg_width = all_region_bounds[ind+1] - region_divisions_with_zero[ind]
            point_y = max_height/2-min_h
            point_x = region_divisions_with_zero[ind] + reg_width/2 - min_w
            
            points_y.append(point_y)
            points_x.append(point_x)

            sum_t = np.count_nonzero(thermreg)
            sum_r = np.count_nonzero(lumreg)
            try:
                weights.append(sum_t*sum_r/(sum_t+sum_r))
            except ZeroDivisionError:
                weights.append(0)
#           weights.append(reg_width*max_height)
#            weights.append( (division_depth - region_count + 1) * abs(point_x-(max_width/2))/max_width )
    
    # Remove the points that are certainly miscalculations: First filter by
    # the location of the cameras, then remove outliers (i.e. points more than 1 iqr away 
    # from the closest percentile.)
    
    clean_mask_1 = np.array([True if y > max_height*11/20 else False for y in points_y])
    clean_mask_1 = np.array([True if True else False for y in points_y])
    semiclean_points_x = np.array(points_x)[clean_mask_1]
    semiclean_points_y = np.array(points_y)[clean_mask_1]
    semiclean_weights = np.array(weights)[clean_mask_1]
    
    from collections import Counter
    #weighted percentiles
    q1, q3 = np.percentile(list(Counter(dict(zip(semiclean_points_y, semiclean_weights.astype(int)))).elements()), [25 ,75])
    #q1, q3 = np.percentile(semiclean_points_y, [25 ,75])
    iqr_y = (q3-q1)*1
    clean_mask_2 = np.array([True if q1 - iqr_y < y < q3 + iqr_y else False for y in semiclean_points_y])
    clean_points_x = np.array(semiclean_points_x)[clean_mask_2]
    clean_points_y = np.array(semiclean_points_y)[clean_mask_2]
    clean_weights = np.array(semiclean_weights)[clean_mask_2]

    # Create the polynomial features and fit the regression.
    poly = PolynomialFeatures(degree=degree)
    X_t = poly.fit_transform(np.array(clean_points_x).reshape((-1,1)))
    
    clf = LinearRegression()
    clf.fit(X_t, clean_points_y, sample_weight = clean_weights)
    
    points = np.linspace(0,max_width,10)
    data = poly.fit_transform(points.reshape((-1,1)))
    line = clf.predict(data)
    
    # Create a grid of values from the regression to estimate the transformation matrix.
    x_points_grid = np.array([points , points, points, points, points])
    y_points_grid = np.array([line-20, line-10, line, line+10, line+20])
    src = np.array([(x-half_width,y-half_height) for x,y in zip(x_points_grid.flatten(), y_points_grid.flatten())])
    cent = max_height/2
    y_points_truegrid = np.broadcast_to(np.array([[cent-20], [cent-10], [cent], [cent+10], [cent+20]]), y_points_grid.shape)
    dest = np.array([(x-half_width,y-half_height) for x,y in zip(x_points_grid.flatten(), y_points_truegrid.flatten())])
    
    trans = transform.PolynomialTransform()
    trans.estimate(src*desired_thermal_scale,dest*desired_thermal_scale,degree)
    
    if plot:
        
        import cv2
        fig, ax = plt.subplots(nrows=1, ncols=5, figsize = (20,5))
        
        ax[0].imshow(frame_thermal)
        ax[0].set_title('thermal frame. Initial res: 80x60')
        ax[1].imshow(frame_RGB)
        ax[1].set_title('RGB frame. Initial res: 640x480')
        ax[2].imshow(frame_thermal)
        ax[2].scatter(points_x, points_y,color = 'r')
        ax[2].scatter(clean_points_x, clean_points_y,color = 'g')
        ax[2].plot(points, line,scalex = False, scaley= False)
        ax[2].set_xlim(0,max_height)
        ax[2].set_ylim(max_width,0)
        ax[2].set_title('Corr. points and the quadratic fit. Red: outliers.')
        ax[3].imshow(therm_proc)
        ax[3].set_title('Edges of thermal frame')
        
        warped = transform.warp(frame_thermal,trans)
        
        scaled_aligned_thermal = cv2.applyColorMap((warped*256).astype('uint8'), cv2.COLORMAP_JET)[...,::-1]
        
        overlay = cv2.addWeighted(scaled_aligned_thermal, 0.3, (frame_RGB).astype('uint8'), 0.7, 0)
        ax[4].imshow(overlay)
        ax[4].set_title('final overlay')
    
    return trans.params
コード例 #49
0
# --- Convert the images to gray level: color is not supported.
image0 = rgb2gray(image0)
image1 = rgb2gray(image1)

# --- Compute the optical flow
v, u = optical_flow_tvl1(image0, image1)

# --- Use the estimated optical flow for registration

nr, nc = image0.shape

row_coords, col_coords = np.meshgrid(np.arange(nr), np.arange(nc),
                                     indexing='ij')

image1_warp = warp(image1, np.array([row_coords + v, col_coords + u]),
                   mode='edge')

# build an RGB image with the unregistered sequence
seq_im = np.zeros((nr, nc, 3))
seq_im[..., 0] = image1
seq_im[..., 1] = image0
seq_im[..., 2] = image0

# build an RGB image with the registered sequence
reg_im = np.zeros((nr, nc, 3))
reg_im[..., 0] = image1_warp
reg_im[..., 1] = image0
reg_im[..., 2] = image0

# build an RGB image with the registered sequence
target_im = np.zeros((nr, nc, 3))
コード例 #50
0
def shear(img, shear_amt):
    shear = tf.AffineTransform(shear=shear_amt)
    img = tf.warp(img, inverse_map=shear, mode='reflect')
    return img
コード例 #51
0
ファイル: meshwarp.py プロジェクト: marvelikov/face_lls
def mainMeshWarp():
    '''1. read image and load new landmarks (nod)'''
    image = cv2.imread('../../github/vrn-07231340/examples/trump-12.jpg')
    imageTmp = copy.deepcopy(image)
    rows, cols = image.shape[0], image.shape[1]

    originLandmark2D = getLandmark2D(image)

    targetLandmark2DPath = '../../data/talkingphoto/IMG_2294/IMG_2294_26.png.txt'
    targetLandmark2D = readPoints(targetLandmark2DPath)[:68]
    '''2. 新建source mesh'''
    gridSize = 50
    src_rows = np.linspace(0, rows, gridSize)  # 10 行
    src_cols = np.linspace(0, cols, gridSize)  # 20 列
    src_rows, src_cols = np.meshgrid(src_rows, src_cols)
    src = np.dstack([src_cols.flat, src_rows.flat])[0]
    '''!!! IMPORTANT DEEPCOPY!!!'''
    dst = copy.deepcopy(src)
    '''triTxt'''
    # triTxtPath = './meshTri_640_640(trump-12).txt'
    triList = generateMeshTriTxt(image, 50, 50, originLandmark2D)
    '''3. SKIMAGE画一个椭圆, 并且得到椭圆内所有的网格点'''
    rr, cc = ellipse_perimeter(originLandmark2D[ELLIPSE_CENTER][1],
                               originLandmark2D[ELLIPSE_CENTER][0],
                               200,
                               260,
                               orientation=30)
    tmp = rr
    rr = cc
    cc = tmp
    # print rr.shape, cc.shape
    ellipseVerts = np.dstack([rr, cc])[0]  # 椭圆边长上所有点
    # print ellipseVerts
    mask = points_in_poly(src, ellipseVerts)

    pointsInEllipseList = []
    indexInEllipseList = []
    for i, (s, m) in enumerate(zip(src, mask)):
        if m == True:
            pointsInEllipseList.append(s)
            indexInEllipseList.append(i)
    # print len(indexInEllipseList)
    # x=pointsInEllipseList[i][1], y=pointsInEllipseList[i][0]
    pointsInEllipseArray = np.asarray(pointsInEllipseList)
    '''swap collums of pointsInEllipseList'''
    # x=pointsInEllipseList[i][0], y=pointsInEllipseList[i][1]
    # pointsInEllipseArray[:, [0, 1]] = pointsInEllipseArray[:, [1, 0]]
    # image[cc, rr] = 255  # draw ellipse perimeter on image
    drawPointsOnImg(pointsInEllipseArray, imageTmp, 'r')
    '''4. compute delta (68) of each new landmark X(Y) and old landmark X(Y)'''
    deltaAllLandmarksList = []
    for i in range(len(targetLandmark2D)):
        deltaAllLandmarksList.append([
            targetLandmark2D[i][0] - originLandmark2D[i][0],
            targetLandmark2D[i][1] - originLandmark2D[i][1]
        ])
    # deltaAllLandmarksArray = np.asarray(deltaAllLandmarksList)
    # print deltaAllLandmarksList
    '''5. compute delta of each point in ellipse'''
    radius = 5 * rows / float(gridSize)
    targetPointsModifiedInEllipseList = []
    isPointsInEllipseModifiedList = []
    deltaOfMeshPointsInEllipseList = []
    for meshPoint in pointsInEllipseArray:
        _, _, landmarksInSmallGridArray, deltaInSmallGridArray = getLandmarksInSmallGrid(
            meshPoint, originLandmark2D, deltaAllLandmarksList, radius=radius)
        '''画smallGrid以及其内的所有landmarks的delta'''
        # imageTmp = copy.deepcopy(image)
        # cv2.circle(imageTmp, (int(meshPoint[0]),
        #                       int(meshPoint[1])), int(radius), (0, 255, 0), 2)
        # if landmarksInSmallGridArray.shape[0] != 0:
        #     for (deltaX, deltaY), (landmarkX, landmarkY) in zip(deltaInSmallGridArray, landmarksInSmallGridArray):
        #         cv2.line(imageTmp, (int(landmarkX), int(landmarkY)), (int(
        #             landmarkX+deltaX), int(landmarkY+deltaY)), (255, 0, 0), 2)
        #         cv2.circle(imageTmp, (int(landmarkX+deltaX),
        #                               int(landmarkY+deltaY)), 2, (0, 0, 255), -1)
        # cv2.imshow('imgTmp', imageTmp)
        # cv2.waitKey(50)
        deltaXOfMeshPoint = 0
        deltaYOfMeshPoint = 0
        if deltaInSmallGridArray.shape[0] != 0:
            for i, (deltaInSmallGrid, landmarkInSmallGrid) in enumerate(
                    zip(deltaInSmallGridArray, landmarksInSmallGridArray)):
                deltaInSmallGridX = deltaInSmallGrid[0]
                deltaInSmallGridY = deltaInSmallGrid[1]
                deltaXOfMeshPoint = deltaXOfMeshPoint + deltaInSmallGridX / \
                    twoPointsDistance(meshPoint, landmarkInSmallGrid)
                deltaYOfMeshPoint = deltaYOfMeshPoint + deltaInSmallGridY / \
                    twoPointsDistance(meshPoint, landmarkInSmallGrid)
        targetMeshPointX = meshPoint[0] + deltaXOfMeshPoint
        targetMeshPointY = meshPoint[1] + deltaYOfMeshPoint
        deltaOfMeshPointsInEllipseList.append(
            [deltaXOfMeshPoint, deltaYOfMeshPoint])
        if deltaXOfMeshPoint == 0 and deltaYOfMeshPoint == 0:
            isPointsInEllipseModifiedList.append(False)
        else:
            isPointsInEllipseModifiedList.append(True)
        '''画每一个meshPoint的起点和终点'''
        # cv2.line(imageTmp, (int(meshPoint[0]), int(meshPoint[1])),
        #          (int(targetMeshPointX), int(targetMeshPointY)), (0, 255, 0), 1)
        # cv2.circle(imageTmp, (int(meshPoint[0]), int(meshPoint[1])),
        #            2, (0, 255, 0), -1)
        # cv2.circle(imageTmp, (int(targetMeshPointX), int(targetMeshPointY)),
        #            2, (255, 0, 0), -1)
        # if deltaXOfMeshPoint != 0 or deltaYOfMeshPoint != 0:
        #     cv2.imshow('imageTmp', imageTmp)
        #     cv2.waitKey(0)
        targetPointsModifiedInEllipseList.append(
            [targetMeshPointX, targetMeshPointY])
    targetPointsModifiedInEllipseArray = np.asarray(
        targetPointsModifiedInEllipseList)
    # drawPointsOnImg(pointsInEllipseArray, imageTmp, 'b')
    # drawPointsOnImg(targetPointsModifiedInEllipseArray, imageTmp, 'r')

    assert pointsInEllipseArray.shape[0] == len(isPointsInEllipseModifiedList)
    assert pointsInEllipseArray.shape[0] == len(deltaOfMeshPointsInEllipseList)
    '''5.1 compute rest points in ellipse (hair)'''
    targetPointsNotModifiedInEllipseList = []
    for i, meshPoint in enumerate(pointsInEllipseArray):
        deltaXOfMeshPointNotModified = 0
        deltaYOfMeshPointNotModified = 0
        if isPointsInEllipseModifiedList[i] == False:
            meshPointsOnSameColumnArray, meshPointsIndexOnSameColumnList = getMeshPointsOnSameColumn(
                meshPoint, pointsInEllipseArray)
            for meshPointOnSameColumn, meshPointIndexOnSameColumn in zip(
                    meshPointsOnSameColumnArray,
                    meshPointsIndexOnSameColumnList):
                deltaXOfMeshPointOnSameColumn = deltaOfMeshPointsInEllipseList[
                    meshPointIndexOnSameColumn][0]
                deltaYOfMeshPointOnSameColumn = deltaOfMeshPointsInEllipseList[
                    meshPointIndexOnSameColumn][1]
                deltaXOfMeshPointNotModified = deltaXOfMeshPointNotModified + \
                    deltaXOfMeshPointOnSameColumn / \
                    twoPointsDistance(meshPoint, meshPointOnSameColumn)
                deltaYOfMeshPointNotModified = deltaYOfMeshPointNotModified + \
                    deltaYOfMeshPointOnSameColumn / \
                    twoPointsDistance(meshPoint, meshPointOnSameColumn)
        targetMeshPointXNotModified = meshPoint[0] + \
            deltaXOfMeshPointNotModified
        targetMeshPointYNotModified = meshPoint[1] + \
            deltaYOfMeshPointNotModified
        targetPointsNotModifiedInEllipseList.append(
            [targetMeshPointXNotModified, targetMeshPointYNotModified])
    targetPointsNotModifiedInEllipseArray = np.asarray(
        targetPointsNotModifiedInEllipseList)

    assert targetPointsNotModifiedInEllipseArray.shape == targetPointsModifiedInEllipseArray.shape
    '''5.2 merge targetPointsModifiedInEllipseArray and targetPointsNotModifiedInEllipseArray'''
    targetPointsInEllipseList = []
    for targetPointModifiedInEllipse, targetPointNotModifiedInEllipse, isPointInEllipseModified in zip(
            targetPointsModifiedInEllipseArray,
            targetPointsNotModifiedInEllipseArray,
            isPointsInEllipseModifiedList):
        if isPointInEllipseModified == True:
            targetPointsInEllipseList.append(targetPointModifiedInEllipse)
        else:
            targetPointsInEllipseList.append(targetPointNotModifiedInEllipse)
    '''6. compute final target mesh'''
    # print targetPointsInEllipseList.shape
    drawPointsOnImg(targetPointsInEllipseList, imageTmp, 'b')
    # targetPointsInEllipseList[:, [0, 1]
    #                            ] = targetPointsInEllipseList[:, [1, 0]]
    dst[indexInEllipseList] = targetPointsInEllipseList
    '''draw src and dst mesh on image'''
    # drawPointsOnImg(src, imageTmp, 'g')
    # drawPointsOnImg(dst, imageTmp, 'b')
    cv2.imwrite('./tmp.png', imageTmp)
    '''7. PiecewiseAffineTransform without landmarks'''
    tform = PiecewiseAffineTransform()
    tform.estimate(dst, src)
    out = warp(image, tform)
    '''8. imshow and imwrite without landmarks for skimage'''
    imshow('out_pat_wo', out)
    cv2.imwrite('./out_pat_wo.png', out * 255)
    '''7.1 mesh warp with landmarks wirtten by meself'''
    '''SEE EXP03'''
    # '''draw dst triangle'''
    # imgTmp = copy.copy(image)
    # for tri in triList:
    #     a, b, c = tri.split()
    #     a = int(a)
    #     b = int(b)
    #     c = int(c)
    #     z1 = (int(dst[a][0]), int(dst[a][1]))
    #     z2 = (int(dst[b][0]), int(dst[b][1]))
    #     z3 = (int(dst[c][0]), int(dst[c][1]))
    #     cv2.line(imgTmp, z1, z2, (255, 255, 255), 1)
    #     cv2.line(imgTmp, z2, z3, (255, 255, 255), 1)
    #     cv2.line(imgTmp, z3, z1, (255, 255, 255), 1)
    #     cv2.imshow('tmp', imgTmp)
    #     cv2.waitKey(1)
    # imgMorph = morph_modify_for_meshwarp(src, dst, image, triList)
    '''8.1 imshow and imwrite with landmarks by myself'''
    '''SEE EXP03'''
    # imshow('imgMorph', imgMorph)
    # cv2.imwrite('./out_wo.png', imgMorph)
    '''7.2 PiecewiseAffineTransform with landmarks'''
    originLandmark2DArray = np.asarray(originLandmark2D)
    src = np.concatenate((src, originLandmark2DArray), axis=0)
    targetLandmark2DArray = np.asarray(targetLandmark2D)
    dst = np.concatenate((dst, targetLandmark2DArray), axis=0)
    tform = PiecewiseAffineTransform()
    tform.estimate(dst, src)
    out = warp(image, tform)
    '''8.2 imshow and imwrite with landmarks for skimage'''
    imshow('out_pat_w', out)
    cv2.imwrite('./out_pat_w.png', out * 255)
    # cv2.imwrite('./ori.jpg', image)
    # # fig, ax = plt.subplots()
    # # ax.imshow(out)
    # # ax.scatter(pointsInEllipseArray[:, 0], pointsInEllipseArray[:, 1],
    # #            marker='+', color='b', s=5)
    # # ax.plot(tform.inverse(src)[:, 0], tform.inverse(src)[:, 1], '.r')
    # # plt.show()
    '''7.3 mesh warp with landmarks wirtten by meself'''
    originLandmark2DArray = np.asarray(originLandmark2D)
    src = np.concatenate((src, originLandmark2DArray), axis=0)
    targetLandmark2DArray = np.asarray(targetLandmark2D)
    dst = np.concatenate((dst, targetLandmark2DArray), axis=0)
    '''draw dst triangle'''
    # imgTmp = copy.copy(image)
    # for tri in triList:
    #     a, b, c = tri.split()
    #     a = int(a)
    #     b = int(b)
    #     c = int(c)
    #     z1 = (int(dst[a][0]), int(dst[a][1]))
    #     z2 = (int(dst[b][0]), int(dst[b][1]))
    #     z3 = (int(dst[c][0]), int(dst[c][1]))
    #     cv2.line(imgTmp, z1, z2, (255, 255, 255), 1)
    #     cv2.line(imgTmp, z2, z3, (255, 255, 255), 1)
    #     cv2.line(imgTmp, z3, z1, (255, 255, 255), 1)
    #     cv2.imshow('tmp', imgTmp)
    #     cv2.waitKey(1)
    imgMorph = morph_modify_for_meshwarp(src, dst, image, triList)
    '''8.3 imshow and imwrite with landmarks by myself'''
    imshow('imgMorph', imgMorph)
    cv2.imwrite('./out_w.png', imgMorph)
コード例 #52
0
def apply_translation(img):
    translated = 255*warp(img, transform.SimilarityTransform(translation=(np.random.uniform(-5, 5), np.random.uniform(-5, 5))),mode='edge')
    translated = translated.astype(np.uint8)
    return translated.astype(np.uint8)
コード例 #53
0
    def _perform(self):
        do_plot = (self.config.instrument.plot_level >= 3)
        self.logger.info("Extracting arc spectra")
        # Double check
        if not self.action.args.original_filename:
            self.logger.error("No traces found")
            return self.action.args
        # All is ready
        original_filename = self.action.args.original_filename
        self.logger.info("Trace table found: %s" % original_filename)
        # trace = read_table(tab=tab, indir='redux', suffix='trace')
        # Find  and read control points from continuum bars
        if hasattr(self.context, 'trace'):
            trace = self.context.trace
        else:
            trace = read_table(
                input_dir=os.path.join(self.config.instrument.cwd,
                                       self.config.instrument.output_directory),
                file_name=original_filename)
            self.context.trace = {}
            for key in trace.meta.keys():
                self.context.trace[key] = trace.meta[key]
        middle_row = self.context.trace['MIDROW']
        window = self.context.trace['WINDOW']
        self.action.args.reference_bar_separation = self.context.trace[
            'REFDELX']
        self.action.args.contbar_image_number = self.context.trace['CBARSNO']
        self.action.args.contbar_image = self.context.trace['CBARSFL']
        self.action.args.arc_number = self.action.args.ccddata.header['FRAMENO']
        self.action.args.arc_image = self.action.args.ccddata.header['OFNAME']

        self.action.args.source_control_points = trace['src']
        self.action.args.destination_control_points = trace['dst']
        self.action.args.bar_id = trace['barid']
        self.action.args.slice_id = trace['slid']

        self.logger.info("Fitting spatial control points")
        transformation = tf.estimate_transform(
            'polynomial', self.action.args.source_control_points,
            self.action.args.destination_control_points, order=3)

        self.logger.info("Transforming arc image")
        warped_image = tf.warp(self.action.args.ccddata.data, transformation)
        # Write warped arcs if requested
        if self.config.instrument.saveintims:
            from kcwidrp.primitives.kcwi_file_primitives import kcwi_fits_writer
            # write out warped image
            self.action.args.ccddata.data = warped_image
            kcwi_fits_writer(self.action.args.ccddata,
                             table=self.action.args.table,
                             output_file=self.action.args.name,
                             output_dir=self.config.instrument.output_directory,
                             suffix="warped")
            self.logger.info("Transformed arcs produced")
        # extract arcs
        self.logger.info("Extracting arcs")
        arcs = []
        # sectors for bkgnd subtraction
        sectors = 16
        for xyi, xy in enumerate(self.action.args.source_control_points):
            if xy[1] == middle_row:
                xi = int(xy[0]+0.5)
                arc = np.median(
                    warped_image[:, (xi - window):(xi + window + 1)], axis=1)
                # divide spectrum into sectors
                div = int((len(arc)-100) / sectors)
                # get minimum for each sector
                xv = []
                yv = []
                for i in range(sectors):
                    mi = np.nanargmin(arc[50+i*div:50+(i+1)*div])
                    mn = np.nanmin(arc[50+i*div:50+(i+1)*div])
                    xv.append(mi+50+i*div)
                    yv.append(mn)
                # fit minima to model background
                res = np.polyfit(xv, yv, 3)
                xp = np.arange(len(arc))
                bkg = np.polyval(res, xp)   # resulting model
                # plot if requested
                if do_plot:
                    p = figure(title=self.action.args.plotlabel + "ARC # %d" %
                               len(arcs),
                               x_axis_label="Y CCD Pixel",
                               y_axis_label="Flux",
                               plot_width=self.config.instrument.plot_width,
                               plot_height=self.config.instrument.plot_height)
                    p.line(xp, arc, legend_label='Arc', color='blue')
                    p.line(xp, bkg, legend_label='Bkg', color='red')
                    bokeh_plot(p, self.context.bokeh_session)
                    q = input("Next? <cr>, q to quit: ")
                    if 'Q' in q.upper():
                        do_plot = False
                # subtract model background
                arc -= bkg
                # add to arcs list
                arcs.append(arc)
        # Did we get the correct number of arcs?
        if len(arcs) == self.config.instrument.NBARS:
            self.logger.info("Extracted %d arcs" % len(arcs))
            self.context.arcs = arcs
        else:
            self.logger.error("Did not extract %d arcs, extracted %d" %
                              (self.config.instrument.NBARS, len(arcs)))

        log_string = ExtractArcs.__module__
        self.action.args.ccddata.header['HISTORY'] = log_string
        self.logger.info(log_string)

        return self.action.args
コード例 #54
0
def inverse_transform(img, src, dst, original_image_size):
    tform3 = tf.ProjectiveTransform()
    tform3.estimate(src, dst)
    warped = tf.warp(img, tform3.inverse, output_shape=original_image_size)
    return np.round(warped * 255).astype(np.uint8)
コード例 #55
0
# Rotation invariance of ORB features

from skimage import data
from skimage import transform as tf
from skimage.feature import (match_descriptors, corner_harris,
                             corner_peaks, ORB, plot_matches)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt


img1 = rgb2gray(data.astronaut())

img2 = tf.rotate(img1, 135)

tform = tf.AffineTransform(scale=(3.5, 1.1), rotation=0.5, translation=(0, -200))
img3 = tf.warp(img1, tform)

descriptor_extractor = ORB(n_keypoints=200)

descriptor_extractor.detect_and_extract(img1)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors

descriptor_extractor.detect_and_extract(img2)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors

descriptor_extractor.detect_and_extract(img3)
keypoints3 = descriptor_extractor.keypoints
descriptors3 = descriptor_extractor.descriptors
コード例 #56
0
def forward_transform(img, src, dst, target_size):
    tform3 = tf.ProjectiveTransform()
    tform3.estimate(src, dst)
    warped = tf.warp(img, tform3, output_shape=(target_size, target_size))
    return np.round(warped * 255).astype(np.uint8)
def main():
    # Let's import first frame of videofile, and show it
    print('Choose videofile for making calib curves')
    file = get_filenames()
    file = file[0]  # get just single file instead of list
    print('Importing file ', file)
    frame = skvideo.io.vread(file, num_frames=1)  # import just first frame
    frame = rgb2gray(frame[0])  # get element instead of list, make grayscale
    # plt.figure()
    # plt.imshow(frame, cmap=plt.cm.gray)
    # plt.show()

    # Compensate angle if its needed

    finish = False
    angle = 0
    while finish == False:
        angle, finish = rotate_image(frame, angle)
    if angle != 0:
        frame = rotate(frame, angle)

    # Detect center of lightspot, show quadrants:
    centroid = threshold_centroid(frame)
    print('Showing first frame of video with quadrants...')
    plot_im_w_quadrants(frame, centroid, fig_title='1st frame with quadrants')

    # Demonstrate how shifted image looks like
    print('Image shifted for 5px in each axis will look like this:')
    transform = AffineTransform(translation=(5, 5))
    shifted = warp(frame, transform, mode='constant', preserve_range=True)
    plot_im_w_quadrants(shifted, centroid, fig_title='5 px shift')
    print(
        'If you want to have max test shift as shown above, just press enter')
    print(
        '(If lightspot is partially out of field of view, better to choose smaller shift)'
    )
    print(
        'Otherwise manually enter desired shift of image in px, and press enter'
    )
    print('Note: the same shift will be used for each axis')
    max_shift = input()
    if max_shift == '':
        max_shift = 5
    else:
        max_shift = float(max_shift)
    print('Images will be shifted from 0 to %s px' % max_shift)
    k_px_um = input('Enter px to um coefficient (scale):\n')
    k_px_um = float(k_px_um)
    # Shift images along x axis

    shifted_im = []
    # specify parameters for calculations
    # generate dx value for linear shift
    # x_shift = np.array([0.1*dx for dx in range(0, max_shift+1)])
    dx = 0.1
    x_shift = np.arange(0, max_shift * (1 + dx), dx * max_shift)
    # k_px_um = 1.36 # scale px to um
    normalization = False  # don't scale signal over SUM
    shift_vs_sig = True  # calculate shift from signal, not other way
    for dx in x_shift:
        transform = AffineTransform(translation=(dx,
                                                 dx))  # shift along both axis
        shifted_im.append(
            warp(frame, transform, mode='constant', preserve_range=True))

    # Calculate the intensities

    Il = np.array([])
    Iz = np.array([])
    Isum = np.array([])
    for i in range(len(shifted_im)):
        Iz, Il, Isum = calc_intensities(shifted_im[i], centroid, Iz, Il, Isum)

    # Show calculated intensity difference vs displacement and get linear fit coefficients of the calibration:
    # without normalization
    plot_shift_curves(k_px_um=k_px_um,
                      Il=Il,
                      Iz=Iz,
                      Isum=Isum,
                      x_shift=x_shift,
                      normalization=False,
                      shift_vs_sig=shift_vs_sig)
    k_x, b_x, k_y, b_y = calc_calib_line(x_shift=x_shift,
                                         y_shift=x_shift,
                                         k_px_um=k_px_um,
                                         Il=Il,
                                         Iz=Iz,
                                         normalization=False,
                                         shift_vs_sig=shift_vs_sig)
    # with normalization
    plot_shift_curves(k_px_um=k_px_um,
                      Il=Il,
                      Iz=Iz,
                      Isum=Isum,
                      x_shift=x_shift,
                      normalization=True,
                      shift_vs_sig=shift_vs_sig)
    k_x_norm, b_x_norm, k_y_norm, b_y_norm = calc_calib_line(
        x_shift=x_shift,
        y_shift=x_shift,
        k_px_um=k_px_um,
        Il=Il,
        Iz=Iz,
        Isum=Isum,
        normalization=True,
        shift_vs_sig=shift_vs_sig)
    return k_x, b_x, k_y, b_y, k_x_norm, b_x_norm, k_y_norm, b_y_norm
コード例 #58
0
def rotate_images(data_folder, rots_per_pic):
    """Rotates images and produces the new coordinates for their facial keypoint markers.
	Creates rotated files and new markers in the same folder where the input files came from.
	Input path should exist.
	Number of rotations should be a natural number.
	Parameters
	----------
	data_folder : String
		Folder containing raw data to be processed.
	rots_per_pic : Integer
		Number of rotations to be produced per image.
	"""

    print "Rotating images..."

    #search for images in folder iteratively
    old_paths = []
    for folder, subs, files in os.walk(data_folder):
        for filename in files:
            if filename.endswith('.png') or filename.endswith('.jpg'):
                old_paths.append(os.path.join(folder, filename))
    #sorts the paths obtained
    old_paths.sort()

    old_paths_with_sums = {}

    for filename in old_paths:
        old_paths_with_sums[filename] = 0

    #counts how many times the images were already processed
    new_paths = []
    all_files_sum = 0
    already_processed_sum = 0
    for filename in old_paths:
        if "processed" not in filename:
            all_files_sum = all_files_sum + 1
            new_paths.append(filename)
            print('File found:')
            print filename
        else:
            already_processed_sum = already_processed_sum + 1
            matching = [
                s for s in new_paths
                if ((filename.partition("_processed_")[0] + ".png") == s or (
                    filename.partition("_processed_")[0] + ".jpg") == s)
            ]
            for i in matching:
                old_paths_with_sums[i] = old_paths_with_sums[i] + 1
                if old_paths_with_sums[i] >= rots_per_pic:
                    new_paths.remove(i)
                    print('File already processed ' +
                          str(old_paths_with_sums[i]) + ' time(s):')
                    print(i)
                else:
                    print('File processed ' + str(old_paths_with_sums[i]) +
                          ' time(s):')
                    print(i)

    processed_sum = 0
    too_big_angles_sum = 0
    no_desc_found_sum = 0
    markers_out_of_mesh = 0

    for current_path in new_paths:
        #rotates image as many times as needed to achieve the desired number of rotations
        for i in range(int(rots_per_pic) - old_paths_with_sums[current_path]):
            path = current_path

            #loads files generated by Zface if they exist and are not empty
            if (os.path.isfile(path + '.mesh3D')
                    and os.path.isfile(path + '.mesh2D')
                    and os.path.isfile(path + '.ctrl2D')
                    and os.path.isfile(path + '.pars')
                    and os.stat(path + '.mesh3D').st_size != 0
                    and os.stat(path + '.mesh2D').st_size != 0
                    and os.stat(path + '.ctrl2D').st_size != 0
                    and os.stat(path + '.pars').st_size != 0):
                src3 = np.loadtxt(path + '.mesh3D')
                src2 = np.loadtxt(path + '.mesh2D')
                ctrl2 = np.loadtxt(path + '.ctrl2D')
                scale = np.loadtxt(path + '.pars')[0]
                translx = np.loadtxt(path + '.pars')[1]
                transly = np.loadtxt(path + '.pars')[2]
                pitch = np.loadtxt(path + '.pars')[3]
                yaw = np.loadtxt(path + '.pars')[4]
                roll = np.loadtxt(path + '.pars')[5]

                #tests wether or not initial rotation is too large
                if (abs(yaw) < radians(30) and abs(pitch) < radians(15)):

                    image = data.load(path)
                    rows, cols = image.shape[0], image.shape[1]

                    x = src3[:, 0]
                    y = src3[:, 1]
                    z = src3[:, 2]

                    #transform 3D mesh from normalized space and rotation to actual space and rotation
                    x = x * cos(roll) + y * -sin(roll)
                    y = x * sin(roll) + y * cos(roll)
                    z = z

                    x = x * cos(yaw) + z * sin(yaw)
                    y = y
                    z = x * -sin(yaw) + z * cos(yaw)

                    x = x
                    y = y * cos(pitch) + z * -sin(pitch)
                    z = y * sin(pitch) + z * cos(pitch)

                    x = x * scale + translx
                    y = y * scale + transly

                    #ortographically projects the 3D mesh to 2D (this will be our source for the Piecewise Affine Transform)
                    src_cols = x
                    src_rows = y

                    src_rows, src_cols = np.meshgrid(src_rows,
                                                     src_cols,
                                                     sparse=True)
                    src = np.dstack([src_cols.flat, src_rows.flat])[0]

                    #transforms it back to normalized space
                    x = (x - translx) / scale
                    y = (y - transly) / scale

                    #rotates it back to 0 rotation
                    yaw = -yaw
                    pitch = -pitch
                    roll = -roll

                    #adds random rotation
                    real_yaw = radians(random.uniform(-30, 30))
                    real_pitch = radians(random.uniform(-15, 15))
                    real_roll = 0

                    yaw = yaw + real_yaw
                    pitch = pitch + real_pitch
                    roll = roll + real_roll

                    x = x * cos(roll) + y * -sin(roll)
                    y = x * sin(roll) + y * cos(roll)
                    z = z

                    x = x * cos(yaw) + z * sin(yaw)
                    y = y
                    z = x * -sin(yaw) + z * cos(yaw)

                    x = x
                    y = y * cos(pitch) + z * -sin(pitch)
                    z = y * sin(pitch) + z * cos(pitch)

                    #transforms it back to real space
                    x = x * scale + translx
                    y = y * scale + transly

                    #orthographic projection of new coordinates will be the destination for PiecewiseAffineTransform
                    dst_cols = x
                    dst_rows = y
                    dst = np.vstack([dst_cols, dst_rows]).T

                    out_rows = rows
                    out_cols = cols

                    #looks for triangles formed by Delaunay triangularion, extracts the ones associated with each facial keypoint marker
                    tform = PiecewiseAffineTransform()
                    src_triangles, dst_triangles = tform.estimate(
                        src[:, 0:2], dst)
                    ctrl2_transforms = []
                    for current_ctrl2 in ctrl2:
                        for i in range(len(src_triangles)):
                            triangle = polygon.Path(src_triangles[i])
                            if triangle.contains_point(current_ctrl2):
                                ctrl2_transforms.append(tform.affines[i])
                                break
                    if len(ctrl2_transforms) != 49:
                        markers_out_of_mesh = markers_out_of_mesh + 1
                        print "didn't process image, because can't find all shape parameters:"
                        print path
                        continue
                    out_ctrl2 = []
                    for i in range(len(ctrl2_transforms)):
                        #performs transformation on marker
                        out_ctrl2.append(ctrl2_transforms[i](ctrl2[i]))
                    out_ctrl2 = np.transpose((np.transpose(out_ctrl2)[0],
                                              np.transpose(out_ctrl2)[1]))
                    out_ctrl2 = np.squeeze(out_ctrl2)

                    #transforms image to the new surface triangle by triangle using Delaunay triangulation, then interpolation to smooth it out
                    tform = PiecewiseAffineTransform()
                    tform.estimate(dst, src[:, 0:2])
                    out_image = warp(image,
                                     tform,
                                     output_shape=(out_rows, out_cols))

                    out_path = path[:-4] + '_processed' + '_yaw_' + str(
                        real_yaw) + '_pitch_' + str(
                            real_pitch) + '_roll_' + str(real_roll) + path[-4:]

                    #saves image and marker points
                    imsave(out_path, out_image)

                    np.savetxt(out_path + '_0.txt', out_ctrl2)

                    processed_sum = processed_sum + 1
                    print(str(processed_sum) + '. file processed:')
                    print(path)
                else:
                    too_big_angles_sum = too_big_angles_sum + 1
                    print(
                        "didn't process image, because of too big original rotation:"
                    )
                    print(path)
            else:
                no_desc_found_sum = no_desc_found_sum + 1
                print(
                    "didn't process image, beacuse descriptor documents not found:"
                )
                print(path)

    out_paths = []
    for folder, subs, files in os.walk(data_folder):
        for filename in files:
            if filename.endswith('.png') or filename.endswith('.jpg'):
                if "processed" in filename:
                    out_path = os.path.join(folder,
                                            filename).replace(data_folder, "")
                    out_paths.append(out_path)

    #writes paths of generated images into contents
    filename = data_folder + '/contents'

    with open(filename, 'w') as f:
        f.write('\n'.join(out_paths))

    print "Shuffling contents..."
    #shuffles contents
    shuffle_contents(filename)

    #prints some statistics about the process on the screen
    print
    print("Statistics:")
    print("-----------")
    print("Files found: " + str(all_files_sum))
    if all_files_sum != 0:
        print("Already processed: " + str(already_processed_sum))
        print("Got processed now: " + str(processed_sum))
        print(
            "All processed: " + str(
                (processed_sum + already_processed_sum) * 100 / all_files_sum)
            + "%")
        print("Can't be processed because of too big angles: " +
              str(too_big_angles_sum * 100 / all_files_sum) + "%")
        print("Can't be processed because of no decriptors: " +
              str(no_desc_found_sum * 100 / all_files_sum) + "%")
        print("Can't be processed because of markers outside of mesh: " +
              str(markers_out_of_mesh * 100 / all_files_sum) + "%")
コード例 #59
0
def fast_warp(img, tf, output_shape=(50, 50), mode='constant', order=1):
    m = tf.params
    return warp(img, m, output_shape=output_shape, mode=mode, order=order)
コード例 #60
0
def make_cube_helper(argument):
    """Warp each slice"""
    logger = argument['logger']
    logger.info("Transforming image slice %d" % (argument['slice_number'] + 1))
    # input params
    slice_number = argument['slice_number']
    xsize = argument['xsize']
    ysize = argument['ysize']
    tform = argument['geom']['tform'][slice_number]
    xl0 = argument['geom']['xl0'][slice_number]
    xl1 = argument['geom']['xl1'][slice_number]
    # slice data
    slice_img = argument['img'][:, xl0:xl1]
    slice_var = argument['std'][:, xl0:xl1]
    slice_msk = argument['msk'][:, xl0:xl1]
    slice_flg = argument['flg'][:, xl0:xl1]
    if 'obj' in argument:
        slice_obj = argument['obj'][:, xl0:xl1]
    else:
        slice_obj = None
    if 'sky' in argument:
        slice_sky = argument['sky'][:, xl0:xl1]
    else:
        slice_sky = None
    if 'del' in argument:
        slice_del = argument['del'][:, xl0:xl1]
    else:
        slice_del = None
    # do the warping
    warped = tf.warp(slice_img, tform, order=3, output_shape=(ysize, xsize))
    varped = tf.warp(slice_var, tform, order=3, output_shape=(ysize, xsize))
    marped = tf.warp(slice_msk, tform, order=3, output_shape=(ysize, xsize))
    farped = tf.warp(slice_flg,
                     tform,
                     order=3,
                     output_shape=(ysize, xsize),
                     preserve_range=True)

    if slice_obj is not None:
        oarped = tf.warp(slice_obj,
                         tform,
                         order=3,
                         output_shape=(ysize, xsize))
    else:
        oarped = None

    if slice_sky is not None:
        sarped = tf.warp(slice_sky,
                         tform,
                         order=3,
                         output_shape=(ysize, xsize))
    else:
        sarped = None

    if slice_del is not None:
        darped = tf.warp(slice_del,
                         tform,
                         order=3,
                         output_shape=(ysize, xsize),
                         preserve_range=True)
    else:
        darped = None

    return argument['slice_number'], warped, varped, marped, farped, \
        oarped, sarped, darped