Exemplo n.º 1
0
def _col_kernel(upsampled_region_size, upsample_factor, axis_offsets,
                data_shape):

    data_shape_float = tf.cast(data_shape, tf.float32)
    col_constant = tf.cast(data_shape_float[2] * upsample_factor, tf.complex64)
    col_constant = (-1j * 2 * np.pi / col_constant)

    col_kernel_a = tf.range(0, data_shape_float[2], dtype=tf.float32)
    col_kernel_a = fftshift1d(col_kernel_a)
    col_kernel_a = tf.reshape(col_kernel_a, (-1, 1))
    col_kernel_a -= tf.floor(data_shape_float[2] / 2.)
    col_kernel_a = tf.reshape(col_kernel_a, (1, -1))
    col_kernel_a = tf.tile(col_kernel_a, (data_shape[0], 1))

    col_kernel_b = tf.range(0, upsampled_region_size, dtype=tf.float32)
    col_kernel_b = tf.reshape(col_kernel_b, (1, -1))
    col_kernel_b = tf.tile(col_kernel_b, (data_shape[0], 1))
    col_kernel_b = tf.transpose(col_kernel_b)
    col_kernel_b -= tf.transpose(axis_offsets[:, 1])
    col_kernel_b = tf.transpose(col_kernel_b)

    col_kernel_a = tf.expand_dims(col_kernel_a, 1)
    col_kernel_b = tf.expand_dims(col_kernel_b, -1)

    col_kernel = col_kernel_a * col_kernel_b
    col_kernel = tf.transpose(col_kernel, perm=(0, 2, 1))
    col_kernel = col_constant * tf.cast(col_kernel, tf.complex64)
    col_kernel = tf.exp(col_kernel)
    return col_kernel
Exemplo n.º 2
0
def _row_kernel(upsampled_region_size, upsample_factor, axis_offsets,
                data_shape):

    data_shape_float = tf.cast(data_shape, tf.float32)
    row_constant = tf.cast(data_shape_float[1] * upsample_factor, tf.complex64)
    row_constant = (-1j * 2 * np.pi / row_constant)

    row_kernel_a = tf.range(0, upsampled_region_size, dtype=tf.float32)
    row_kernel_a = tf.reshape(row_kernel_a, (1, -1))
    row_kernel_a = tf.tile(row_kernel_a, (data_shape[0], 1))
    row_kernel_a = tf.transpose(row_kernel_a)
    row_kernel_a = row_kernel_a - axis_offsets[:, 0]

    row_kernel_b = tf.range(0, data_shape_float[1], dtype=tf.float32)
    row_kernel_b = fftshift1d(row_kernel_b)
    row_kernel_b = tf.reshape(row_kernel_b, (1, -1))
    row_kernel_b = tf.tile(row_kernel_b, (data_shape[0], 1))
    row_kernel_b = row_kernel_b - tf.floor(data_shape_float[1] / 2.)

    row_kernel_a = tf.expand_dims(row_kernel_a, 1)
    row_kernel_b = tf.expand_dims(row_kernel_b, -1)

    row_kernel = tf.transpose(row_kernel_a) * row_kernel_b
    row_kernel = tf.transpose(row_kernel, perm=(0, 2, 1))
    row_kernel = row_constant * tf.cast(row_kernel, tf.complex64)

    row_kernel = tf.exp(row_kernel)

    return row_kernel
Exemplo n.º 3
0
    def call(self, inputs):

        #Use masks
        if len(inputs) == 3:
            mask = ktf.transpose(inputs[2], [0, 2, 3, 1])
            mask = ktf.image.resize_images(
                mask,
                self.image_size[:2],
                method=ktf.image.ResizeMethod.NEAREST_NEIGHBOR)

        masks = []
        for channel in range(mask.shape[3]):
            masks.append(mask[:, :, :, channel])
        mask = K.concatenate(masks, axis=-2)
        mask = ktf.expand_dims(mask, -1)
        mask = ktf.expand_dims(mask, -1)
        multiples = [1, 1, 1, inputs[0].shape[3], 1]
        mask = ktf.tile(mask, multiples=multiples)

        expanded_tensor = ktf.expand_dims(inputs[0], -1)
        multiples = [1, self.number_of_transforms, 1, 1, 1]
        tiled_tensor = ktf.tile(expanded_tensor, multiples=multiples)
        tiled_tensor = tiled_tensor * mask
        repeated_tensor = ktf.reshape(
            tiled_tensor,
            ktf.shape(inputs[0]) *
            np.array([self.number_of_transforms, 1, 1, 1]))

        affine_transforms = inputs[1] / self.affine_mul

        affine_transforms = ktf.reshape(affine_transforms, (-1, 8))
        tranformed = tf_affine_transform(repeated_tensor, affine_transforms)
        res = ktf.reshape(tranformed,
                          [-1, self.number_of_transforms] + self.image_size)
        res = ktf.transpose(res, [0, 2, 3, 1, 4])

        if self.aggregation_fn == 'none':
            res = ktf.reshape(res, [-1] + self.image_size[:2] +
                              [self.image_size[2] * self.number_of_transforms])
        elif self.aggregation_fn == 'max':
            res = ktf.reduce_max(res, reduction_indices=[-2])
        elif self.aggregation_fn == 'avg':
            if len(inputs) == 3:
                mask = ktf.transpose(inputs[2], [0, 2, 3, 1])
                mask = ktf.image.resize_images(
                    mask,
                    self.image_size[:2],
                    method=ktf.image.ResizeMethod.NEAREST_NEIGHBOR)
                res = res * ktf.expand_dims(mask, axis=-1)
            counts = ktf.reduce_sum(mask, reduction_indices=[-1])
            counts = ktf.expand_dims(counts, axis=-1)
            res = ktf.reduce_sum(res, reduction_indices=[-2])
            res /= counts
            res = ktf.where(ktf.is_nan(res), ktf.zeros_like(res), res)
        return res
Exemplo n.º 4
0
def repeat_theta(theta, n_angles, n_frames):

    repeated = tf.reshape(theta, (1, n_angles))
    repeated = tf.tile(repeated, (n_frames, 1))
    repeated = tf.reshape(repeated, (n_frames * n_angles, ))

    return repeated
Exemplo n.º 5
0
def radon_transform(x, theta):

    x = tf.cast(x, dtype=tf.float32)

    x_shape = tf.shape(x)
    n_cols = x_shape[2]
    n_rows = x_shape[1]
    n_frames = x_shape[0]
    n_angles = tf.shape(theta)[0]

    x = tf.reshape(x, (-1, 1, n_rows, n_cols, 1))
    x = tf.tile(x, (1, n_angles, 1, 1, 1))
    x = tf.reshape(x, (-1, n_rows, n_cols, 1))

    repeated_theta = repeat_theta(theta, n_angles, n_frames)

    x = tf.cast(x, dtype=tf.uint8)
    #x = tf.contrib.image.rotate(x, repeated_theta, interpolation='BILINEAR')
    x = tf.cast(x, dtype=tf.float32)

    x = tf.reshape(x, (-1, n_angles, n_rows, n_cols, 1))
    x = tf.cast(x, dtype=tf.float32)
    x = tf.reduce_sum(x, 2)

    return x
Exemplo n.º 6
0
def degree_matrix(A, return_sparse_batch=False):
    """
    Computes the degree matrix of A, deals with sparse A and batch mode
    automatically.
    :param A: Tensor or SparseTensor with rank k = {2, 3}.
    :param return_sparse_batch: if operating in batch mode, return a
    SparseTensor. Note that the sparse degree tensor returned by this function
    cannot be used for sparse matrix multiplication afterwards.
    :return: SparseTensor of rank k.
    """
    D = degrees(A)

    batch_mode = K.ndim(D) == 2
    N = tf.shape(D)[-1]
    batch_size = tf.shape(D)[0] if batch_mode else 1

    inner_index = tf.tile(tf.stack([tf.range(N)] * 2, axis=1), (batch_size, 1))
    if batch_mode:
        if return_sparse_batch:
            outer_index = tf_repeat_1d(
                tf.range(batch_size),
                tf.ones(batch_size) * tf.cast(N, tf.float32))
            indices = tf.concat([outer_index[:, None], inner_index], 1)
            dense_shape = (batch_size, N, N)
        else:
            return tf.linalg.diag(D)
    else:
        indices = inner_index
        dense_shape = (N, N)

    indices = tf.cast(indices, tf.int64)
    values = tf.reshape(D, (-1, ))
    return tf.SparseTensor(indices, values, dense_shape)
Exemplo n.º 7
0
def batch_map_offsets(input, offsets, order=1):
    """Batch map offsets into input
    Adds index of every entry to the entry to make it's interpolation
    relevant to it's location
    """

    offset_shape = offsets.get_shape()
    batch_size = tf.shape(offsets)[0]

    input_h = offset_shape[1]
    input_w = offset_shape[2]

    channel_size = int(offset_shape[3].value)
    #offsets = tf.reshape(offsets, (batch_size, -1, 2))
    #################### DEFAULT COORDINATES FOR EVERY POINT ####################
    ind_add = tf.meshgrid(tf.range(1, input_h + 1, delta=1),
                          tf.range(1, input_w + 1, delta=1),
                          indexing='ij')
    ind_add = tf.stack(ind_add, axis=-1)
    ind_add = tf.cast(ind_add, 'float32')
    ind_add = tf.reshape(ind_add, (1, input_h, input_w, 2))
    ind_add = tf.tile(ind_add, [batch_size, 1, 1, int(channel_size / 2)])
    #############################################################################

    #################### KERNEL OFFSET FOR EVERY POINT ####################
    ind_zero = tf.meshgrid(tf.range(-1, 2, delta=1),
                           tf.range(-1, 2, delta=1),
                           indexing='ij')
    ind_zero = tf.stack(ind_zero, axis=-1)
    ind_zero = tf.cast(ind_zero, 'float32')
    ind_zero = tf.reshape(ind_zero, (1, 1, 1, channel_size))
    ind_zero = tf.tile(ind_zero, [batch_size, input_h, input_w, 1])
    #######################################################################

    coords = offsets + ind_add + ind_zero

    int_vals = batch_map_coordinates(input, coords, int(channel_size / 2))
    return int_vals
Exemplo n.º 8
0
def top_k(scores, I, ratio, top_k_var):
    """
    Returns indices to get the top K values in `scores` segment-wise, with
    segments defined by I. K is not fixed, but it is defined as a ratio of the
    number of elements in each segment.
    :param scores: a rank 1 tensor with scores;
    :param I: a rank 1 tensor with segment IDs;
    :param ratio: float, ratio of elements to keep for each segment;
    :param top_k_var: a tf.Variable without shape validation (e.g.,
    `tf.Variable(0.0, validate_shape=False)`);
    :return: a rank 1 tensor containing the indices to get the top K values of
    each segment in `scores`.
    """
    num_nodes = tf.segment_sum(tf.ones_like(I),
                               I)  # Number of nodes in each graph
    cumsum = tf.cumsum(num_nodes)  # Cumulative number of nodes (A, A+B, A+B+C)
    cumsum_start = cumsum - num_nodes  # Start index of each graph
    n_graphs = tf.shape(num_nodes)[0]  # Number of graphs in batch
    max_n_nodes = tf.reduce_max(num_nodes)  # Order of biggest graph in batch
    batch_n_nodes = tf.shape(I)[0]  # Number of overall nodes in batch
    to_keep = tf.ceil(ratio * tf.cast(num_nodes, tf.float32))
    to_keep = tf.cast(to_keep, tf.int32)  # Nodes to keep in each graph

    index = tf.range(batch_n_nodes)
    index = (index - tf.gather(cumsum_start, I)) + (I * max_n_nodes)

    y_min = tf.reduce_min(scores)
    dense_y = tf.ones((n_graphs * max_n_nodes, ))
    dense_y = dense_y * tf.cast(
        y_min - 1, tf.float32
    )  # subtract 1 to ensure that filler values do not get picked
    dense_y = tf.assign(
        top_k_var, dense_y, validate_shape=False
    )  # top_k_var is a variable with unknown shape defined in the elsewhere
    dense_y = tf.scatter_update(dense_y, index, scores)
    dense_y = tf.reshape(dense_y, (n_graphs, max_n_nodes))

    perm = tf.argsort(dense_y, direction='DESCENDING')
    perm = perm + cumsum_start[:, None]
    perm = tf.reshape(perm, (-1, ))

    to_rep = tf.tile(tf.constant([1., 0.]), (n_graphs, ))
    rep_times = tf.reshape(
        tf.concat((to_keep[:, None], (max_n_nodes - to_keep)[:, None]), -1),
        (-1, ))
    mask = tf_repeat_1d(to_rep, rep_times)

    perm = tf.boolean_mask(perm, mask)

    return perm
Exemplo n.º 9
0
def tf_repeat_1d(x, repeats):
    """
    Repeats each value `x[i]` a number of times `repeats[i]`.
    :param x: a rank 1 tensor;
    :param repeats: a rank 1 tensor;
    :return: a rank 1 tensor, of shape `(sum(repeats), )`.
    """
    x = tf.expand_dims(x, 1)
    max_repeats = tf.reduce_max(repeats)
    tile_repeats = [1, max_repeats]
    arr_tiled = tf.tile(x, tile_repeats)
    mask = tf.less(tf.range(max_repeats), tf.expand_dims(repeats, 1))
    result = tf.reshape(tf.boolean_mask(arr_tiled, mask), [-1])
    return result
def batch_map_coordinates(input, coords, order=1):
    """Batch version of tf_map_coordinates"""

    input_shape = tf.shape(input)
    batch_size = input_shape[0]
    input_size = input_shape[1]

    #coords = tf.reshape(coords, (batch_size, -1, 2))

    n_coords = tf.shape(coords)[1]

    coords = tf.clip_by_value(coords, 0, tf.cast(input_size, 'float32') - 1)

    coords_tl = tf.cast(tf.floor(coords), 'int32')
    coords_br = tf.cast(tf.ceil(coords), 'int32')
    coords_bl = tf.stack([coords_tl[..., 0], coords_br[..., 1]], axis=-1)
    coords_tr = tf.stack([coords_br[..., 0], coords_tl[..., 1]], axis=-1)

    idx = tf.range(batch_size)
    idx = tf.expand_dims(idx, -1)
    idx = tf.tile(idx, [1, n_coords])
    idx = tf.reshape(idx, [-1])

    def _get_vals_by_coords(input, coords):
        coords_0_flat = tf.reshape(coords[..., 0], [-1])
        coords_1_flat = tf.reshape(coords[..., 1], [-1])
        indices = tf.stack([idx, coords_0_flat, coords_1_flat], axis=-1)
        vals = tf.gather_nd(input, indices)
        vals = tf.reshape(vals, (batch_size, n_coords))
        return vals

    vals_tl = _get_vals_by_coords(input, coords_tl)
    vals_br = _get_vals_by_coords(input, coords_br)
    vals_bl = _get_vals_by_coords(input, coords_bl)
    vals_tr = _get_vals_by_coords(input, coords_tr)

    h_offset = coords[..., 0] - tf.cast(coords_tl[..., 0], tf.float32)

    h_int_t = (((1.0 - h_offset) * vals_tl) + (h_offset * vals_tr))
    h_int_b = (((1.0 - h_offset) * vals_bl) + (h_offset * vals_br))

    v_offset = coords[..., 1] - tf.cast(coords_tl[..., 1], tf.float32)

    int_vals = (((1.0 - v_offset) * h_int_t) + (v_offset * h_int_b))

    return int_vals
Exemplo n.º 11
0
def _upsampled_registration(target_image, src_image, upsample_factor):

    upsample_factor = tf.constant(upsample_factor, tf.float32)

    target_shape = tf.shape(target_image)
    target_image = tf.reshape(target_image, target_shape[:3])
    src_shape = tf.shape(src_image)
    src_image = tf.reshape(src_image, src_shape[:3])

    src_freq = fft2d(src_image)
    target_freq = fft2d(target_image)

    shape = tf.reshape(tf.shape(src_freq)[1:3], (1, 2))
    shape = tf.cast(shape, tf.float32)
    shape = tf.tile(shape, (tf.shape(target_freq)[0], 1))
    image_product = src_freq * tf.conj(target_freq)
    cross_correlation = tf.spectral.ifft2d(image_product)

    maxima = find_maxima(tf.abs(cross_correlation))
    midpoints = fix(tf.cast(shape, tf.float32) / 2.)

    shifts = maxima
    shifts = tf.where(shifts > midpoints, shifts - shape, shifts)
    shifts = tf.round(shifts * upsample_factor) / upsample_factor

    upsampled_region_size = tf.ceil(upsample_factor * 1.5)
    dftshift = fix(upsampled_region_size / 2.0)
    normalization = tf.cast(tf.size(src_freq[0]), tf.float32)
    normalization *= upsample_factor**2
    sample_region_offset = dftshift - shifts * upsample_factor

    data = tf.conj(image_product)
    upsampled_dft = _upsampled_dft(data, upsampled_region_size,
                                   upsample_factor, sample_region_offset)

    cross_correlation = tf.conj(upsampled_dft)
    cross_correlation /= tf.cast(normalization, tf.complex64)
    cross_correlation = tf.abs(cross_correlation)

    maxima = find_maxima(cross_correlation)
    maxima = maxima - dftshift
    shifts = shifts + maxima / upsample_factor

    return shifts
    def call(self, inputs):
        expanded_tensor = ktf.expand_dims(inputs[0], -1)
        print('expanded_tensor:', expanded_tensor.shape)
        multiples = [1, self.number_of_transforms, 1, 1, 1]
        tiled_tensor = ktf.tile(expanded_tensor, multiples=multiples)
        print('tiled_tensor:', tiled_tensor.shape)
        repeated_tensor = ktf.reshape(
            tiled_tensor,
            ktf.shape(inputs[0]) *
            np.array([self.number_of_transforms, 1, 1, 1]))
        print('repeated_tensor:', repeated_tensor.shape)

        perspective_transforms = inputs[1] / self.perspective_mul

        perspective_transforms = ktf.reshape(perspective_transforms, (-1, 8))
        tranformed = tf_perspective_transform(repeated_tensor,
                                              perspective_transforms)
        res = ktf.reshape(tranformed,
                          [-1, self.number_of_transforms] + self.image_size)
        res = ktf.transpose(res, [0, 2, 3, 1, 4])

        #Use masks
        if len(inputs) == 3:
            mask = ktf.transpose(inputs[2], [0, 2, 3, 1])
            mask = ktf.image.resize_images(
                mask,
                self.image_size[:2],
                method=ktf.image.ResizeMethod.NEAREST_NEIGHBOR)
            res = res * ktf.expand_dims(mask, axis=-1)

        if self.aggregation_fn == 'none':
            res = ktf.reshape(res, [-1] + self.image_size[:2] +
                              [self.image_size[2] * self.number_of_transforms])
        elif self.aggregation_fn == 'max':
            res = ktf.reduce_max(res, reduction_indices=[-2])
        elif self.aggregation_fn == 'avg':
            counts = ktf.reduce_sum(mask, reduction_indices=[-1])
            counts = ktf.expand_dims(counts, axis=-1)
            res = ktf.reduce_sum(res, reduction_indices=[-2])
            res /= counts
            res = ktf.where(ktf.is_nan(res), ktf.zeros_like(res), res)
        return res
def batch_map_offsets(input, offsets, order=1):
    """Batch map offsets into input
    Adds index of every entry to the entry to make it's interpolation
    relevant to it's location
    """

    input_shape = tf.shape(input)
    batch_size = input_shape[0]
    input_w = input_shape[1]
    input_h = input_shape[2]
    offsets = tf.reshape(offsets, (batch_size, -1, 2))

    ind_add = tf.meshgrid(tf.range(input_w), tf.range(input_h), indexing='ij')
    ind_add = tf.stack(ind_add, axis=-1)
    ind_add = tf.cast(ind_add, 'float32')
    ind_add = tf.reshape(ind_add, (-1, 2))
    ind_add = tf.expand_dims(ind_add, 0)
    ind_add = tf.tile(ind_add, [batch_size, 1, 1])

    coords = offsets + ind_add

    int_vals = batch_map_coordinates(input, coords)
    return int_vals