Ejemplo n.º 1
0
def sparse_bool_mask(x, mask, axis=0):
    # Only necessary if indices may have non-unique elements
    indices = tf.boolean_mask(tf.range(tf.shape(x)[axis]), mask)
    n_indices = tf.size(indices)
    # Get indices for the axis
    idx = x.indices[:, axis]
    # Find where indices match the selection
    eq = tf.equal(tf.expand_dims(idx, 1),
                  tf.cast(indices, tf.int64))  # TODO this has quadratic cost
    # Mask for selected values
    sel = tf.reduce_any(eq, axis=1)
    # Selected values
    values_new = tf.boolean_mask(x.values, sel, axis=0)
    # New index value for selected elements
    n_indices = tf.cast(n_indices, tf.int64)
    idx_new = tf.reduce_sum(tf.cast(eq, tf.int64) * tf.range(n_indices),
                            axis=1)
    idx_new = tf.boolean_mask(idx_new, sel, axis=0)
    # New full indices tensor
    indices_new = tf.boolean_mask(x.indices, sel, axis=0)
    indices_new = tf.concat([
        indices_new[:, :axis],
        tf.expand_dims(idx_new, 1), indices_new[:, axis + 1:]
    ],
                            axis=1)
    # New shape
    shape_new = tf.concat(
        [x.dense_shape[:axis], [n_indices], x.dense_shape[axis + 1:]], axis=0)
    return tf.SparseTensor(indices_new, values_new, shape_new)
Ejemplo n.º 2
0
def check_angles(x, rotation_guess):
    x = tf.reshape(x, (-1, 1))
    x = angle_mod(x)
    rA = radians(x)
    rA = tf.concat([tf.cos(rA), tf.sin(rA)], axis=-1)
    rI = tf.reshape(rotation_guess, (-1, 1))
    rI = radians(rI)
    rI = tf.concat([tf.cos(rI), tf.sin(rI)], axis=-1)
    guess_test = tf.matmul(rA, rI, transpose_b=True)
    x = tf.where(guess_test < 0, angle_mod(x - 180), x)
    return x
Ejemplo n.º 3
0
def _find_subpixel_maxima(x,
                          kernel_size,
                          sigma,
                          upsample_factor,
                          coordinate_scale=1,
                          confidence_scale=255.):

    kernel = gaussian_kernel_2d(kernel_size, sigma)
    kernel = tf.expand_dims(kernel, 0)

    x_shape = tf.shape(x)
    rows = x_shape[1]
    cols = x_shape[2]

    max_vals = tf.reduce_max(tf.reshape(x, [-1, rows * cols]), axis=1)
    max_vals = tf.reshape(max_vals, [-1, 1]) / confidence_scale

    row_pad = rows // 2 - kernel_size // 2
    col_pad = cols // 2 - kernel_size // 2
    padding = [[0, 0], [row_pad, row_pad - 1], [col_pad, col_pad - 1]]
    kernel = tf.pad(kernel, padding)

    row_center = row_pad + (kernel_size // 2)
    col_center = col_pad + (kernel_size // 2)
    center = tf.stack([row_center, col_center])
    center = tf.expand_dims(center, 0)
    center = tf.cast(center, dtype=tf.float32)

    shifts = _upsampled_registration(x, kernel, upsample_factor)
    shifts = center - shifts
    shifts *= coordinate_scale
    maxima = tf.concat([shifts[:, ::-1], max_vals], -1)

    return maxima
Ejemplo n.º 4
0
def degree_matrix(A, return_sparse_batch=False):
    """
    Computes the degree matrix of A, deals with sparse A and batch mode
    automatically.
    :param A: Tensor or SparseTensor with rank k = {2, 3}.
    :param return_sparse_batch: if operating in batch mode, return a
    SparseTensor. Note that the sparse degree tensor returned by this function
    cannot be used for sparse matrix multiplication afterwards.
    :return: SparseTensor of rank k.
    """
    D = degrees(A)

    batch_mode = K.ndim(D) == 2
    N = tf.shape(D)[-1]
    batch_size = tf.shape(D)[0] if batch_mode else 1

    inner_index = tf.tile(tf.stack([tf.range(N)] * 2, axis=1), (batch_size, 1))
    if batch_mode:
        if return_sparse_batch:
            outer_index = tf_repeat_1d(
                tf.range(batch_size),
                tf.ones(batch_size) * tf.cast(N, tf.float32))
            indices = tf.concat([outer_index[:, None], inner_index], 1)
            dense_shape = (batch_size, N, N)
        else:
            return tf.linalg.diag(D)
    else:
        indices = inner_index
        dense_shape = (N, N)

    indices = tf.cast(indices, tf.int64)
    values = tf.reshape(D, (-1, ))
    return tf.SparseTensor(indices, values, dense_shape)
Ejemplo n.º 5
0
def fftshift1d(x, axis=0):

    x_shape = tf.shape(x)
    x = tf.reshape(x, (-1, 1))
    n_samples = tf.cast(tf.shape(x)[0], tf.float32)
    even = n_samples / 2.
    even = tf.round(even)
    even = even * 2.
    even = tf.equal(n_samples, even)

    def true_fn():
        return x

    def false_fn():
        x_padded = tf.concat([x, tf.zeros((1, 1))], axis=0)
        return x_padded

    x = tf.cond(even, true_fn, false_fn)
    x1, x2 = tf.split(x, 2, axis=axis)

    def true_fn():
        return x2

    def false_fn():
        x2_unpadded = x2[:-1]
        return x2_unpadded

    x2 = tf.cond(even, true_fn, false_fn)
    x = tf.concat((x2, x1), axis=axis)
    x = tf.reshape(x, x_shape)

    return x
Ejemplo n.º 6
0
    def nn_loss(self, reference, target, neighborhood_size=(3, 3)):
        v_pad = neighborhood_size[0] // 2
        h_pad = neighborhood_size[1] // 2
        val_pad = ktf.pad(reference,
                          [[0, 0], [v_pad, v_pad], [h_pad, h_pad], [0, 0]],
                          mode='CONSTANT',
                          constant_values=-10000)

        reference_tensors = []
        for i_begin in range(0, neighborhood_size[0]):
            i_end = i_begin - neighborhood_size[0] + 1
            i_end = None if i_end == 0 else i_end
            for j_begin in range(0, neighborhood_size[1]):
                j_end = j_begin - neighborhood_size[0] + 1
                j_end = None if j_end == 0 else j_end
                sub_tensor = val_pad[:, i_begin:i_end, j_begin:j_end, :]
                reference_tensors.append(ktf.expand_dims(sub_tensor, -1))
        reference = ktf.concat(reference_tensors, axis=-1)
        target = ktf.expand_dims(target, axis=-1)

        abs = ktf.abs(reference - target)
        norms = ktf.reduce_sum(abs, reduction_indices=[-2])
        loss = ktf.reduce_min(norms, reduction_indices=[-1])

        return loss
Ejemplo n.º 7
0
 def _max_tile(self, images):
     #num_images = args['num_images']
     num_images = self.batch_size / self.gpus
     im_list = ktf.split(images, num_images, 0)
     maxed = im_list
     counter = 0
     for im in im_list:
         maxed[counter] = ktf.reduce_max(im, axis=0, keepdims=True)
         counter += 1
     return ktf.concat(maxed, 0)
Ejemplo n.º 8
0
    def call(self, x):
        x_shape = x.get_shape()
        offsets = super(Conv2DOffset, self).call(x)
        #offsets *= 10

        channels = int(offsets.get_shape()[3].value)
        n_batches = tf.shape(offsets)[0]

        # Change offset's order from [x1, x2, ..., y1, y2, ...] to [x1, y1, x2, y2, ...]
        # Codes below are written to make sure same results of MXNet implementation.
        # You can remove them, and it won't influence the module's performance.
        ind_shuffle = tf.concat(
            [tf.range(0, channels, 2),
             tf.range(1, channels + 1, 2)], axis=0)

        #ind_shuffle = tf.expand_dims(ind_shuffle, axis=0)
        #ind_shuffle = tf.expand_dims(ind_shuffle, axis=0)
        #ind_shuffle = tf.tile(ind_shuffle, [input_w, input_h, 1])

        offsets = tf.gather(offsets, ind_shuffle, axis=3)
        # ------------------------------------------------------------------------
        #x = tf.transpose(x, [0, 3, 1, 2])
        #x = tf.reshape(x, (-1, int(x_shape[1]), int(x_shape[2])))
        #offsets = tf.resampler(x, offsets)
        offsets = batch_map_offsets(x, offsets)
        #offsets = tf.reshape(x, (-1, int(x_shape[3]), int(x_shape[1]), int(x_shape[2])))
        #offsets = tf.transpose(x, [0, 2, 3, 1])
        offset_shape = offsets.get_shape()
        num_channels = offset_shape[1].value
        height = offset_shape[2].value
        width = offset_shape[3].value
        f_offset = [
            tf.reshape(offsets[..., ind:ind + 3],
                       (-1, num_channels, height, width * 3))
            for ind in range(0, 9, 3)
        ]
        f_offset = tf.concat(f_offset, axis=-1)
        f_offset = tf.reshape(f_offset,
                              (-1, num_channels, height * 3, width * 3))
        f_offset = tf.transpose(f_offset, (0, 2, 3, 1))
        return f_offset
Ejemplo n.º 9
0
def find_maxima(x):

    col_max = tf.reduce_max(x, axis=1)
    row_max = tf.reduce_max(x, axis=2)

    cols = tf.cast(tf.argmax(col_max, 1), tf.float32)
    rows = tf.cast(tf.argmax(row_max, 1), tf.float32)
    cols = tf.reshape(cols, (-1, 1))
    rows = tf.reshape(rows, (-1, 1))

    maxima = tf.concat([rows, cols], -1)

    return maxima
Ejemplo n.º 10
0
def top_k(scores, I, ratio, top_k_var):
    """
    Returns indices to get the top K values in `scores` segment-wise, with
    segments defined by I. K is not fixed, but it is defined as a ratio of the
    number of elements in each segment.
    :param scores: a rank 1 tensor with scores;
    :param I: a rank 1 tensor with segment IDs;
    :param ratio: float, ratio of elements to keep for each segment;
    :param top_k_var: a tf.Variable without shape validation (e.g.,
    `tf.Variable(0.0, validate_shape=False)`);
    :return: a rank 1 tensor containing the indices to get the top K values of
    each segment in `scores`.
    """
    num_nodes = tf.segment_sum(tf.ones_like(I),
                               I)  # Number of nodes in each graph
    cumsum = tf.cumsum(num_nodes)  # Cumulative number of nodes (A, A+B, A+B+C)
    cumsum_start = cumsum - num_nodes  # Start index of each graph
    n_graphs = tf.shape(num_nodes)[0]  # Number of graphs in batch
    max_n_nodes = tf.reduce_max(num_nodes)  # Order of biggest graph in batch
    batch_n_nodes = tf.shape(I)[0]  # Number of overall nodes in batch
    to_keep = tf.ceil(ratio * tf.cast(num_nodes, tf.float32))
    to_keep = tf.cast(to_keep, tf.int32)  # Nodes to keep in each graph

    index = tf.range(batch_n_nodes)
    index = (index - tf.gather(cumsum_start, I)) + (I * max_n_nodes)

    y_min = tf.reduce_min(scores)
    dense_y = tf.ones((n_graphs * max_n_nodes, ))
    dense_y = dense_y * tf.cast(
        y_min - 1, tf.float32
    )  # subtract 1 to ensure that filler values do not get picked
    dense_y = tf.assign(
        top_k_var, dense_y, validate_shape=False
    )  # top_k_var is a variable with unknown shape defined in the elsewhere
    dense_y = tf.scatter_update(dense_y, index, scores)
    dense_y = tf.reshape(dense_y, (n_graphs, max_n_nodes))

    perm = tf.argsort(dense_y, direction='DESCENDING')
    perm = perm + cumsum_start[:, None]
    perm = tf.reshape(perm, (-1, ))

    to_rep = tf.tile(tf.constant([1., 0.]), (n_graphs, ))
    rep_times = tf.reshape(
        tf.concat((to_keep[:, None], (max_n_nodes - to_keep)[:, None]), -1),
        (-1, ))
    mask = tf_repeat_1d(to_rep, rep_times)

    perm = tf.boolean_mask(perm, mask)

    return perm
Ejemplo n.º 11
0
    def _tile_images(self, images):
        num_images = self.batch_size / self.gpus
        channels = ktf.split(images, images.shape[3], axis=3)
        del images
        counter = 0
        for channel in channels:

            tiles = ktf.extract_image_patches(channel,
                                              ksizes=[1, 512, 512, 1],
                                              strides=[1, 512, 512, 1],
                                              rates=[1, 1, 1, 1],
                                              padding="VALID")
            num_tiles = tiles.shape[1] * tiles.shape[2]
            tiles = ktf.reshape(tiles,
                                [num_tiles * num_images, 1, 1, tiles.shape[3]])
            tiles = ktf.reshape(tiles, [num_tiles * num_images, 512, 512, 1])
            channels[counter] = tiles
            counter += 1
        return ktf.concat(channels, 3)
Ejemplo n.º 12
0
 def false_fn():
     x_padded = tf.concat([x, tf.zeros((1, 1))], axis=0)
     return x_padded