Esempio n. 1
0
def test_optical_flow_dtype():
    # Generate synthetic data
    rnd = np.random.RandomState(0)
    image0 = rnd.normal(size=(256, 256))
    gt_flow, image1 = _sin_flow_gen(image0)
    # Estimate the flow at double precision
    flow_f64 = optical_flow_tvl1(image0,
                                 image1,
                                 attachment=5,
                                 dtype=np.float64)

    assert flow_f64.dtype == np.float64

    # Estimate the flow at single precision
    flow_f32 = optical_flow_tvl1(image0,
                                 image1,
                                 attachment=5,
                                 dtype=np.float32)

    assert flow_f32.dtype == np.float32

    # Assert that floating point precision does not affect the quality
    # of the estimated flow

    assert np.abs(flow_f64 - flow_f32).mean() < 1e-3
Esempio n. 2
0
def test_2d_motion(dtype):
    # Generate synthetic data
    rnd = np.random.RandomState(0)
    image0 = rnd.normal(size=(256, 256))
    gt_flow, image1 = _sin_flow_gen(image0)
    image1 = image1.astype(dtype, copy=False)
    float_dtype = _supported_float_type(dtype)
    # Estimate the flow
    flow = optical_flow_tvl1(image0, image1, attachment=5, dtype=float_dtype)
    assert flow.dtype == float_dtype
    # Assert that the average absolute error is less then half a pixel
    assert abs(flow - gt_flow) .mean() < 0.5

    if dtype != float_dtype:
        with pytest.raises(ValueError):
            optical_flow_tvl1(image0, image1, attachment=5, dtype=dtype)
Esempio n. 3
0
def test_no_motion_3d():
    rnd = np.random.default_rng(0)
    img = rnd.normal(size=(64, 64, 64))

    flow = optical_flow_tvl1(img, img)

    assert np.all(flow == 0)
Esempio n. 4
0
def test_no_motion_3d():
    rnd = np.random.RandomState(0)
    img = rnd.normal(size=(128, 128, 128))

    flow = optical_flow_tvl1(img, img)

    assert np.all(flow == 0)
Esempio n. 5
0
    def _estimate_single(self, predicted, measured):
        assert predicted.shape == self.shape
        assert measured.shape == self.shape
        flow = optical_flow_tvl1(predicted, measured)
        flow[[1,0],] = flow[[0,1],]
        xy_flow = self.xy_lin - flow
        _Afunc_coord_warp = lambda transform_vec: self._coordinate_warp(transform_vec, self.xy_lin, xy_flow)    

        #estimate transform matrix from optical flow
        results = sop.fmin_l_bfgs_b(_Afunc_coord_warp, np.array([0.0,0,0]))
        transform_final = results[0]
        if results[2]["warnflag"]:
            transform_final *= 0.0
            print("Transform estimation not converged")

        #inverse warp measured image
        transform_mat = np.array([np.cos(transform_final[0]), \
                                  -np.sin(transform_final[0]), \
                                  np.sin(transform_final[0]), \
                                  np.cos(transform_final[0]), \
                                  transform_final[1], \
                                  transform_final[2]])        
        aff_mat = np.array([transform_mat[[0,1,4]], transform_mat[[2,3,5]],[0,0,1]])
        tform = transform.AffineTransform(matrix = aff_mat)
        measured_warp = transform.warp(measured, tform.inverse, cval = 1.0)

        return measured_warp, transform_final
Esempio n. 6
0
def test_3d_motion():
    # Generate synthetic data
    rnd = np.random.RandomState(0)
    image0 = rnd.normal(size=(128, 128, 128))
    gt_flow, image1 = _sin_flow_gen(image0)
    # Estimate the flow
    flow = optical_flow_tvl1(image0, image1, attachment=5)
    # Assert that the average absolute error is less then half a pixel
    assert abs(flow - gt_flow).mean() < 0.5
Esempio n. 7
0
def main():
    images = []
    for rgb_f in read_one_frame():
        images.append(rgb_f)
    numLevels = 5
    window = 25
    # Plot
    # u,v=LucasKanadeMultiScale(grayscale(images[0]),grayscale(images[1]),window,numLevels)
    v, u = optical_flow_tvl1(grayscale(images[0]), grayscale(images[-1]))
    print(np.max(v))
    plot_optical_flow(images[0],images[-1],u*10,v*10, \
                    'levels = ' + str(numLevels) + ', window = '+str(window))
def optical_flow_correction(images):
    #be carefull
    #works with picture artifacts
    # --- Use the estimated optical flow for registration
    example = sk.color.rgb2gray(images[0])
    nr, nc = example.shape
    row_coords, col_coords = np.meshgrid(np.arange(nr),
                                         np.arange(nc),
                                         indexing='ij')
    warped_images = []
    for image in images:
        image = sk.color.rgb2gray(image)
        v, u = optical_flow_tvl1(example, image)

        image_warp = tf.warp(image,
                             np.array([row_coords + v, col_coords + u]),
                             mode='nearest')
        '''
        colors = []
        for channel in range(3) :
            color = image[..., channel]

            # --- Compute the optical flow
            v, u = optical_flow_tvl1(example,color)   
       
            color_warp = tf.warp(image, np.array([row_coords + v, 
                                                  col_coords + u]),
                                                  mode='nearest')

            colors.append(color_warp) 
        # build an RGB image with the registered sequence
        reg_im = np.zeros((nr, nc, 3))
        reg_im[..., 0] = color[0]
        reg_im[..., 1] = color[1]
        reg_im[..., 2] = color[2]
            
        warped_images.append(reg_im)
        '''
        warped_images.append(image_warp)
        example = image

    return np.asarray(warped_images)
Esempio n. 9
0
def test_wrong_dtype():
    rnd = np.random.RandomState(0)
    img = rnd.normal(size=(256, 256))
    with testing.raises(ValueError):
        u, v = optical_flow_tvl1(img, img, dtype=np.int64)
Esempio n. 10
0
def test_incompatible_shapes():
    rnd = np.random.RandomState(0)
    I0 = rnd.normal(size=(256, 256))
    I1 = rnd.normal(size=(128, 256))
    with testing.raises(ValueError):
        u, v = optical_flow_tvl1(I0, I1)
 def time_tvl1(self, dtype):
     registration.optical_flow_tvl1(self.I0, self.I1, dtype=dtype)
Esempio n. 12
0
import imageio
from matplotlib import pyplot as plt
from skimage.color import rgb2gray
from skimage.data import stereo_motorcycle
from skimage.transform import warp
from skimage.registration import optical_flow_tvl1, optical_flow_ilk

# --- Load the sequence
image0, image1, disp = stereo_motorcycle()

# --- Convert the images to gray level: color is not supported.
image0 = rgb2gray(image0)
image1 = rgb2gray(image1)

# --- Compute the optical flow
v, u = optical_flow_tvl1(image0, image1)

# --- Use the estimated optical flow for registration

nr, nc = image0.shape

row_coords, col_coords = np.meshgrid(np.arange(nr),
                                     np.arange(nc),
                                     indexing='ij')

image1_warp = warp(image1,
                   np.array([row_coords + v, col_coords + u]),
                   mode='nearest')

# build an RGB image with the unregistered sequence
seq_im = np.zeros((nr, nc, 3))
Esempio n. 13
0
def estimate_framesPair_OF(image_0, image_1):

    v, u = optical_flow_tvl1(image_0, image_1)
    return (np.mean(np.sqrt(np.power(u, 2) + np.power(v, 2))))
Esempio n. 14
0
# reference_image, moving_image, disp = stereo_motorcycle()
# reference_image = rgb2gray(reference_image)
# moving_image = rgb2gray(moving_image)

reference_image = io.imread("cube1.png", as_gray=True)
moving_image = io.imread("cube2.png", as_gray=True)
dpi = 24
px, py = reference_image.shape
fig = pyplot.figure(figsize=(py / numpy.float(dpi), px / numpy.float(dpi)))
ax = fig.add_axes([0, 0, 1, 1])
ax.imshow(reference_image, cmap="gray")
fig.savefig("reference_image.png", transparent=True)
ax.imshow(moving_image, cmap="gray")
fig.savefig("moving_image.png", transparent=True)

flow = optical_flow_tvl1(moving_image, reference_image)
downscale = 20
flow = skimage.measure.block_reduce(flow, (1, downscale, downscale),
                                    numpy.mean)

h, w = flow[0].shape
X = np.arange(w)
Y = np.arange(h)
U, V = flow[1], flow[0]

M = np.hypot(U, V)
q = ax.quiver(X,
              Y,
              U,
              V,
              transform=Affine2D().scale(downscale, downscale) + ax.transData,
Esempio n. 15
0
 def time_tvl1(self):
     registration.optical_flow_tvl1(self.I0, self.I1)
Esempio n. 16
0
def test_wrong_dtype():
    rnd = np.random.default_rng(0)
    img = rnd.normal(size=(256, 256))
    with pytest.raises(ValueError):
        u, v = optical_flow_tvl1(img, img, dtype=np.int64)
Esempio n. 17
0
def test_incompatible_shapes():
    rnd = np.random.default_rng(0)
    I0 = rnd.normal(size=(256, 256))
    I1 = rnd.normal(size=(128, 256))
    with pytest.raises(ValueError):
        u, v = optical_flow_tvl1(I0, I1)
Esempio n. 18
0
 def register(self, img_fixed, img_moving):
     flow_y, flow_x = optical_flow_tvl1(img_fixed, img_moving)
     return TransformFlow(flow_x, flow_y)
Esempio n. 19
0
plt.show()

############################################################
#Method 4: Optical flow based shift. Best for warped images. 
#takes two images and returns a vector field. 
#For every pixel in image 1 you get a vector showing where it moved to in image 2.

from skimage import io

image = io.imread("images/Osteosarcoma_01.tif", as_gray=True)
offset_image = io.imread("images/Osteosarcoma_01_transl.tif", as_gray=True)
# offset image translated by (-17, 18) in y and x 


from skimage import registration
flow = registration.optical_flow_tvl1(image, offset_image)

# display dense optical flow
flow_x = flow[1, :, :]  #Along width
flow_y = flow[0, :, :]  #Along height


#Example 1: Simple application by just taking mean of flow in x and y
#Let us find the mean of all pixels in x and y and shift image by that amount
#ideally, you need to move each pixel by the amount from flow
import numpy as np
xoff = np.mean(flow_x)
yoff = np.mean(flow_y)


print("Offset image was translated by: 18, -17")
Esempio n. 20
0
def video_interpolation_OF(image0_rgb, image1_rgb):

    import numpy as np
    from matplotlib import pyplot as plt
    from skimage.color import rgb2gray
    from skimage.data import stereo_motorcycle
    from skimage.transform import warp
    from skimage.registration import optical_flow_tvl1
    from skimage import img_as_float

    # Convert the images to gray level: color is not supported.
    image0 = rgb2gray(image0_rgb)
    image1 = rgb2gray(image1_rgb)

    # --- Compute the optical flow
    v, u = optical_flow_tvl1(image0, image1)

    # --- Use the estimated optical flow for registration

    nr, nc = image0.shape

    row_coords, col_coords = np.meshgrid(np.arange(nr),
                                         np.arange(nc),
                                         indexing='ij')

    image1_warp_grey = warp(image1,
                            np.array([row_coords + v, col_coords + u]),
                            mode='nearest')
    image1_warp_r = warp(img_as_float(image1_rgb[..., 0]),
                         np.array([row_coords + v, col_coords + u]),
                         mode='nearest')
    image1_warp_g = warp(img_as_float(image1_rgb[..., 1]),
                         np.array([row_coords + v, col_coords + u]),
                         mode='nearest')
    image1_warp_b = warp(img_as_float(image1_rgb[..., 2]),
                         np.array([row_coords + v, col_coords + u]),
                         mode='nearest')

    a = 0

    # build an RGB image with the unregistered sequence
    seq_im = np.zeros((nr, nc, 3))
    seq_im[..., 0] = image1
    seq_im[..., 1] = image0
    seq_im[..., 2] = image0

    # build an RGB image with the registered sequence
    reg_im = np.zeros((nr, nc, 3))
    reg_im[..., 0] = image1_warp_grey
    reg_im[..., 1] = image0
    reg_im[..., 2] = image0

    # build an RGB image with the all three independent channels
    reg_im_rgb = np.zeros((nr, nc, 3))
    reg_im_rgb[..., 0] = image1_warp_r
    reg_im_rgb[..., 1] = image1_warp_g
    reg_im_rgb[..., 2] = image1_warp_b

    # build an RGB image with the registered sequence
    target_im = np.zeros((nr, nc, 3))
    target_im[..., 0] = image0
    target_im[..., 1] = image0
    target_im[..., 2] = image0

    # --- Show the result

    fig, ((ax0, ax1, ax2), (ax3, ax4, ax5)) = plt.subplots(2,
                                                           3,
                                                           figsize=(5, 10))

    ax0.imshow(image0_rgb)
    ax0.set_title("Image t0", fontSize=10)
    ax0.set_axis_off()

    ax1.imshow(image1_rgb)
    ax1.set_title("Image t1", fontSize=10)
    ax1.set_axis_off()

    ax2.imshow(reg_im_rgb)
    ax2.set_title("Registered seq RGB", fontSize=10)
    ax2.set_axis_off()

    ax4.imshow(seq_im)
    ax4.set_title("Unregistered sequence", fontSize=10)
    ax4.set_axis_off()

    ax3.imshow(reg_im)
    ax3.set_title("Registered seq", fontSize=10)
    ax3.set_axis_off()

    fig.tight_layout()
    plt.show()