def Unsupervised_HomographyModel(Img, C4A, I2, I1Full, ImageSize,
                                 MiniBatchSize):

    H4pt = HomographyModel(Img, ImageSize, MiniBatchSize)
    C4A_pts = tf.reshape(C4A, [MiniBatchSize, 8])
    print(C4A.get_shape())
    H_mat = TensorDLT(H4pt, C4A_pts, MiniBatchSize)
    img_h = 128
    img_w = 128
    # Constants and variables used for spatial transformer
    M = np.array([[img_w / 2.0,
                   0., img_w / 2.0], [0., img_h / 2.0, img_h / 2.0],
                  [0., 0., 1.]]).astype(np.float32)

    M_tensor = tf.constant(M, tf.float32)
    M_tile = tf.tile(tf.expand_dims(M_tensor, [0]), [MiniBatchSize, 1, 1])
    # Inverse of M
    M_inv = np.linalg.inv(M)
    M_tensor_inv = tf.constant(M_inv, tf.float32)
    M_tile_inv = tf.tile(tf.expand_dims(M_tensor_inv, [0]),
                         [MiniBatchSize, 1, 1])

    y_t = tf.range(0, MiniBatchSize * img_w * img_h, img_w * img_h)
    z = tf.tile(tf.expand_dims(y_t, [1]), [1, 128 * 128])
    batch_indices_tensor = tf.reshape(
        z, [-1]
    )  # Add these value to patch_indices_batch[i] for i in range(num_pairs) # [BATCH_SIZE*WIDTH*HEIGHT]

    # Transform H_mat since we scale image indices in transformer
    H_mat = tf.matmul(tf.matmul(M_tile_inv, H_mat), M_tile)
    # Transform image 1 (large image) to image 2
    out_size = (img_h, img_w)

    I1 = tf.slice(Img, [0, 0, 0, 0], [MiniBatchSize, 128, 128, 1])
    print(I1)
    print(Img)
    warped_images, _ = transformer(I2, H_mat, out_size)
    # print(warped_images.get_shape())
    warped_gray_images = tf.reduce_mean(warped_images, 3)
    # warped_images_flat = tf.reshape(warped_gray_images, [-1])

    # x_t_flat, y_t_flat = get_mesh_grid_per_img(128, 128)
    # print(C4A.get_shape())
    # print(warped_images_flat.get_shape())
    # x_start_tf = C4A[0] # 1,
    # print(x_start_tf)
    # y_start_tf = C4A[1]  # (1, )
    # patch_indices_tf = (y_t_flat + y_start_tf)*img_w + (x_t_flat + x_start_tf)
    # patch_indices = tf.cast(patch_indices_tf, tf.int32)
    # patch_indices_flat = tf.reshape(patch_indices, [-1])
    # pixel_indices =  patch_indices_flat + batch_indices_tensor
    # pred_I2_flat = tf.gather(warped_images_flat, pixel_indices)

    # # pred_I2_flat = tf.gather(warped_images_flat, batch_indices_tensor)

    pred_I2_flat = warped_gray_images

    pred_I2 = tf.reshape(pred_I2_flat, [MiniBatchSize, 128, 128, 1])

    return pred_I2, I2
Пример #2
0
def unsupervised_HomographyNet(patch_batches,
                               corners_a,
                               patch_b,
                               image_a,
                               patch_indices,
                               batch_size=64):

    # note : corners_a is in shape 4,2 [[x1,y1][x2,y2][x3,y3][x4,y4]]

    batch_size, h, w, channels = image_a.get_shape().as_list()

    H4_batches = homographyNet(
        patch_batches)  # H4 = [dx1,dy1,dx2,dy2,dx3,dy3,dx4,dy4]

    corners_a = tf.reshape(
        corners_a, [batch_size, 8])  # convert to 8x1 [x1,y1,x2,y2,x3,y3,x4,y4]

    H_batches = TensorDLT(H4_batches, corners_a, batch_size)

    # compute M
    M = np.array([[w / 2.0, 0., w / 2.0], [0., h / 2.0, h / 2.0],
                  [0., 0., 1.]]).astype(np.float32)

    tensor_M = tf.constant(M, tf.float32)
    tensor_M = tf.expand_dims(tensor_M, [0])
    M_batches = tf.tile(
        tensor_M, [batch_size, 1, 1])  #make 'batch_size' number of copies.

    #compute M_inv
    M_inv = np.linalg.inv(M)
    tensor_M_inv = tf.constant(M_inv, tf.float32)
    tensor_M_inv = tf.expand_dims(tensor_M_inv, [0])
    M_inv_batches = tf.tile(
        tensor_M_inv, [batch_size, 1, 1])  #make 'batch_size' number of copies.

    H_scaled = tf.matmul(tf.matmul(M_inv_batches, H_batches), M_batches)

    #     Pa = tf.slice(patch_batches,[0,0,0,0],[batch_size,128,128,1])

    warped_Ia, _ = transformer(image_a, H_scaled, (h, w))

    warped_Ia = tf.reshape(warped_Ia, [batch_size, h, w])
    warped_Pa = tf.gather_nd(warped_Ia, patch_indices, name=None, batch_dims=1)

    warped_Pa = tf.transpose(warped_Pa, perm=[0, 2, 1])

    warped_Pa = tf.reshape(warped_Pa, [batch_size, 128, 128, 1])

    return warped_Pa, patch_b, H_batches
Пример #3
0
def HomographyModel_unsup(Img, C4A, I2, MiniBatchSize):

    H4pt = HomographyModel_sup(Img)

    # reshape the corner from MiniBatchSize, 4, 2 to the size below
    C4A_pts = tf.reshape(C4A, [MiniBatchSize, 8])

    # compute the homography via TensorDLT
    H_mat = TensorDLT(H4pt, C4A_pts, MiniBatchSize)

    # define the image width and height
    img_h = 128
    img_w = 128

    # Constants and variables used for spatial transformer
    M = np.array([[img_w / 2.0,
                   0., img_w / 2.0], [0., img_h / 2.0, img_h / 2.0],
                  [0., 0., 1.]]).astype(np.float32)

    M_tensor = tf.constant(M, tf.float32)
    M_tile = tf.tile(tf.expand_dims(M_tensor, [0]), [MiniBatchSize, 1, 1])

    # Inverse of M
    M_inv = np.linalg.inv(M)
    M_tensor_inv = tf.constant(M_inv, tf.float32)
    M_tile_inv = tf.tile(tf.expand_dims(M_tensor_inv, [0]),
                         [MiniBatchSize, 1, 1])

    y_t = tf.range(0, MiniBatchSize * img_w * img_h, img_w * img_h)
    z = tf.tile(tf.expand_dims(y_t, [1]), [1, 128 * 128])
    batch_indices_tensor = tf.reshape(
        z, [-1]
    )  # Add these value to patch_indices_batch[i] for i in range(num_pairs) # [BATCH_SIZE*WIDTH*HEIGHT]

    # Transform H_mat since we scale image indices in transformer
    H_mat = tf.matmul(tf.matmul(M_tile_inv, H_mat), M_tile)
    # Transform image 1 (large image) to image 2
    out_size = (img_h, img_w)

    I1 = tf.slice(Img, [0, 0, 0, 0], [MiniBatchSize, 128, 128, 1])
    warped_images, _ = transformer(I2, H_mat, out_size)
    # print(warped_images.get_shape())

    warped_gray_images = tf.reduce_mean(warped_images, 3)
    pred_I2 = tf.reshape(warped_gray_images, [MiniBatchSize, 128, 128, 1])

    return pred_I2, I2
Пример #4
0
    def transform(self):
        # Transform H_mat since we scale image indices in transformer
        H_mat = tf.matmul(tf.matmul(self.M_tile_inv, self.H_mat), self.M_tile)
        # Transform image 1 (large image) to image 2
        out_size = (self.imgH, self.imgW)
        I12 = tf.slice(self.stackedData, [0, 0, 0, 0],
                       [self.batchSize, 128, 128, 1])
        print(
            str(tf.shape(self.stackedData)) +
            '================================')
        warped_images, _ = transformer(I12, H_mat, out_size)
        # TODO: warp image 2 to image 1

        warped_gray_images = tf.reduce_mean(warped_images, 3)
        #        warped_images_flat = tf.reshape(warped_gray_images, [-1])
        #        self.getPatches()
        #        pixel_indices =  self.patch_indices_flat + self.batch_indices_tensor
        #        pred_I2_flat = tf.gather(warped_images_flat, pixel_indices)
        self.pred_I2 = tf.reshape(warped_gray_images,
                                  [self.batchSize, 128, 128, 1])
Пример #5
0
def transform(ImageSize, HMat, MiniBatchSize, I1):
    M = np.array([[ImageSize[1] / 2.0, 0., ImageSize[1] / 2.0],
                  [0., ImageSize[0] / 2.0, ImageSize[0] / 2.0],
                  [0., 0., 1.]]).astype(np.float32)
    M_tensor = tf.constant(M, tf.float32)
    M_tile = tf.tile(tf.expand_dims(M_tensor, [0]), [MiniBatchSize, 1, 1])
    # Inverse of M
    M_inv = np.linalg.inv(M)
    M_tensor_inv = tf.constant(M_inv, tf.float32)
    M_tile_inv = tf.tile(tf.expand_dims(M_tensor_inv, [0]),
                         [MiniBatchSize, 1, 1])
    # Transform H_mat since we scale image indices in transformer
    H_mat = tf.matmul(tf.matmul(M_tile_inv, HMat), M_tile)
    # H_flat = tf.reshape(H_mat, [-1,9])
    # H_flatExcLastElem = H_flat[:, 0:8]
    # Transform image 1 (large image) to image 2
    out_size = (ImageSize[0], ImageSize[1])
    # warped_image = tf.contrib.image.transform(I1, H_flatExcLastElem, interpolation='BILINEAR', output_shape=None, name=None)
    warped_image, _ = transformer(I1, H_mat, out_size)
    # TODO: warp image 2 to image 1

    # # width = ImageSize[1]; height = ImageSize[0]
    #    y_t = tf.range(0, MiniBatchSize*ImageSize[1]*ImageSize[0], ImageSize[1]*ImageSize[0])
    #    z =  tf.tile(tf.expand_dims(y_t,[1]),[1,PatchSize[1]*PatchSize[0]])
    #    batch_indices_tensor = tf.reshape(z, [-1]) # Add these value to patch_indices_batch[i] for i in range(num_pairs) # [BATCH_SIZE*WIDTH*HEIGHT]

    # # Extract the warped patch from warped_images by flatting the whole batch before using indices
    # # Note that input I  is  3 channels so we reduce to gray
    # warped_gray_images = tf.reduce_mean(warped_image, 3)
    # warped_images_flat = tf.reshape(warped_gray_images, [-1])
    # patch_indices_flat = tf.reshape(self.patch_indices, [-1])
    # pixel_indices =  patch_indices_flat + batch_indices_tensor
    # pred_I2_flat = tf.gather(warped_images_flat, pixel_indices)

    # warped_patch = tf.reshape(pred_I2_flat, [MiniBatchSize, PatchSize[1], PatchSize[0], 1])

    return warped_image