예제 #1
0
def resize_images(x, height_factor, width_factor, data_format):
    if data_format == 'channels_first':
        original_shape = K.int_shape(x)
        new_shape = tf.shape(x)[2:]

        new_shape = K.cast(new_shape, 'float64')
        new_shape *= tf.constant(np.array([height_factor, width_factor]))
        new_shape = K.cast(new_shape, 'int32')

        x = K.permute_dimensions(x, [0, 2, 3, 1])
        x = tf.image.resize_nearest_neighbor(x, new_shape)
        x = K.permute_dimensions(x, [0, 3, 1, 2])
        x.set_shape((None, None, original_shape[2] *
                     height_factor if original_shape[2] is not None else None,
                     original_shape[3] *
                     width_factor if original_shape[3] is not None else None))
        return x
    elif data_format == 'channels_last':
        original_shape = K.int_shape(x)
        new_shape = tf.shape(x)[1:3]

        new_shape = K.cast(new_shape, 'float64')
        new_shape *= tf.constant(np.array([height_factor, width_factor]))
        new_shape = K.cast(new_shape, 'int32')

        x = tf.image.resize_nearest_neighbor(x, new_shape)
        x.set_shape(
            (None, original_shape[1] *
             height_factor if original_shape[1] is not None else None,
             original_shape[2] *
             width_factor if original_shape[2] is not None else None, None))
        return x
    else:
        raise ValueError('Invalid data_format:', data_format)
예제 #2
0
def _register_rotation(target_image, src_image, rotation_resolution,
                       rotation_guess, upsample_factor):

    n_angles = tf.cast(tf.round(180. / rotation_resolution), tf.int32)
    theta = tf.linspace(0., 180. - rotation_resolution, n_angles)
    theta = -radians(theta)

    target_shape = tf.shape(target_image)
    target_image = tf.reshape(target_image, target_shape[:3])
    src_shape = tf.shape(src_image)
    src_image = tf.reshape(src_image, src_shape[:3])

    rotation_guess = tf.constant(rotation_guess, tf.float32)
    rotation_resolution = tf.constant(rotation_resolution, tf.float32)

    src_image = radon_transform_fft(src_image, theta)
    target_image = radon_transform_fft(target_image, theta)
    shifts = _upsampled_registration(target_image, src_image, upsample_factor)

    angles = shifts[:, 0] * rotation_resolution
    angles = tf.reshape(angles, [-1, 1])
    angles = check_angles(angles, rotation_guess)
    angles = radians(angles)

    return angles
예제 #3
0
def binary_focal_loss(gamma=2, alpha=0.25):
    """
    Binary form of focal loss.
    ÊÊÓÃÓÚ¶þ·ÖÀàÎÊÌâµÄfocal loss

    focal_loss(p_t) = -alpha_t * (1 - p_t)**gamma * log(p_t)
        where p = sigmoid(x), p_t = p or 1 - p depending on if the label is 1 or 0, respectively.
    References:
        https://arxiv.org/pdf/1708.02002.pdf
    Usage:
     model.compile(loss=[binary_focal_loss(alpha=.25, gamma=2)], metrics=["accuracy"], optimizer=adam)
    """
    alpha = tf.constant(alpha, dtype=tf.float32)
    gamma = tf.constant(gamma, dtype=tf.float32)

    def binary_focal_loss_fixed(y_true, y_pred):
        """
        y_true shape need be (None,1)
        y_pred need be compute after sigmoid
        """
        y_true = tf.cast(y_true, tf.float32)
        alpha_t = y_true * alpha + (K.ones_like(y_true) - y_true) * (1 - alpha)

        p_t = y_true * y_pred + (K.ones_like(y_true) - y_true) * (
            K.ones_like(y_true) - y_pred) + K.epsilon()
        focal_loss = -alpha_t * K.pow(
            (K.ones_like(y_true) - p_t), gamma) * K.log(p_t)
        return K.mean(focal_loss)

    return binary_focal_loss_fixed
예제 #4
0
파일: utils.py 프로젝트: liatli/deepposekit
def gaussian_kernel_1d(size, sigma):
    size = tf.constant(size, dtype=tf.float32)
    sigma = tf.constant(sigma, dtype=tf.float32)
    x = tf.range(-(size // 2), (size // 2) + 1, dtype=tf.float32)
    kernel = 1 / (sigma * tf.sqrt(2 * np.pi))
    kernel *= tf.exp(-0.5 * (x / sigma)**2)
    return tf.expand_dims(kernel, axis=-1)
예제 #5
0
def resize_images(x, height_factor, width_factor, interpolation, data_format):
    """Resizes the images contained in a 4D tensor.
    # Arguments
        x: Tensor or variable to resize.
        height_factor: Positive integer.
        width_factor: Positive integer.
        interpolation: string, "nearest", "bilinear" or "bicubic"
        data_format: string, `"channels_last"` or `"channels_first"`.
    # Returns
        A tensor.
    # Raises
        ValueError: if `data_format` is neither `"channels_last"` or `"channels_first"`.
    """
    if interpolation == 'nearest':
        tf_resize = tf.image.resize_nearest_neighbor
    elif interpolation == 'bilinear':
        tf_resize = tf.image.resize_bilinear
    elif interpolation == 'bicubic':
        tf_resize = tf.image.resize_bicubic
    else:
        raise ValueError('Invalid interpolation method:', interpolation)
    if data_format == 'channels_first':
        original_shape = int_shape(x)
        new_shape = tf.shape(x)[2:]
        new_shape *= tf.constant(
            np.array([height_factor, width_factor]).astype('int32'))
        x = permute_dimensions(x, [0, 2, 3, 1])
        x = tf_resize(x, new_shape, align_corners=True)
        x = permute_dimensions(x, [0, 3, 1, 2])
        x.set_shape((None, None, original_shape[2] *
                     height_factor if original_shape[2] is not None else None,
                     original_shape[3] *
                     width_factor if original_shape[3] is not None else None))
        return x
    elif data_format == 'channels_last':
        original_shape = int_shape(x)
        new_shape = tf.shape(x)[1:3]
        new_shape *= tf.constant(
            np.array([height_factor, width_factor]).astype('int32'))
        x = tf_resize(x, new_shape, align_corners=True)
        x.set_shape(
            (None, original_shape[1] *
             height_factor if original_shape[1] is not None else None,
             original_shape[2] *
             width_factor if original_shape[2] is not None else None, None))
        return x
    else:
        raise ValueError('Invalid data_format:', data_format)
예제 #6
0
파일: layers.py 프로젝트: jgraving/leap
def resize_images(x, height_factor, width_factor, interpolation, data_format):
    """Resizes the images contained in a 4D tensor.
    # Arguments
        x: Tensor or variable to resize.
        height_factor: Positive integer.
        width_factor: Positive integer.
        interpolation: string, "nearest", "bilinear" or "bicubic"
        data_format: string, `"channels_last"` or `"channels_first"`.
    # Returns
        A tensor.
    # Raises
        ValueError: if `data_format` is neither `"channels_last"` or `"channels_first"`.
    """
    if interpolation == 'nearest':
        tf_resize = tf.image.resize_nearest_neighbor
    elif interpolation == 'bilinear':
        tf_resize = tf.image.resize_bilinear
    elif interpolation == 'bicubic':
        tf_resize = tf.image.resize_bicubic
    else:
        raise ValueError('Invalid interpolation method:', interpolation)
    if data_format == 'channels_first':
        original_shape = int_shape(x)
        new_shape = tf.shape(x)[2:]
        new_shape *= tf.constant(np.array([height_factor, width_factor]).astype('int32'))
        x = permute_dimensions(x, [0, 2, 3, 1])
        x = tf_resize(x, new_shape, align_corners=True)
        x = permute_dimensions(x, [0, 3, 1, 2])
        x.set_shape((None, None, original_shape[2] * height_factor if original_shape[2] is not None else None,
                     original_shape[3] * width_factor if original_shape[3] is not None else None))
        return x
    elif data_format == 'channels_last':
        original_shape = int_shape(x)
        new_shape = tf.shape(x)[1:3]
        new_shape *= tf.constant(np.array([height_factor, width_factor]).astype('int32'))
        x = tf_resize(x, new_shape, align_corners=True)
        x.set_shape((None, original_shape[1] * height_factor if original_shape[1] is not None else None,
                     original_shape[2] * width_factor if original_shape[2] is not None else None, None))
        return x
    else:
        raise ValueError('Invalid data_format:', data_format)
예제 #7
0
def jacobian(y_flat, x):
    n = y_flat.shape[0]

    loop_vars = [
        ktf.constant(0, ktf.int32),
        ktf.TensorArray(ktf.float32, size=n),
    ]

    _, jacobian = ktf.while_loop(
        lambda j, _: j < n, lambda j, result:
        (j + 1, result.write(j, ktf.gradients(y_flat[j], x))), loop_vars)

    return jacobian.stack()
예제 #8
0
def top_k(scores, I, ratio, top_k_var):
    """
    Returns indices to get the top K values in `scores` segment-wise, with
    segments defined by I. K is not fixed, but it is defined as a ratio of the
    number of elements in each segment.
    :param scores: a rank 1 tensor with scores;
    :param I: a rank 1 tensor with segment IDs;
    :param ratio: float, ratio of elements to keep for each segment;
    :param top_k_var: a tf.Variable without shape validation (e.g.,
    `tf.Variable(0.0, validate_shape=False)`);
    :return: a rank 1 tensor containing the indices to get the top K values of
    each segment in `scores`.
    """
    num_nodes = tf.segment_sum(tf.ones_like(I),
                               I)  # Number of nodes in each graph
    cumsum = tf.cumsum(num_nodes)  # Cumulative number of nodes (A, A+B, A+B+C)
    cumsum_start = cumsum - num_nodes  # Start index of each graph
    n_graphs = tf.shape(num_nodes)[0]  # Number of graphs in batch
    max_n_nodes = tf.reduce_max(num_nodes)  # Order of biggest graph in batch
    batch_n_nodes = tf.shape(I)[0]  # Number of overall nodes in batch
    to_keep = tf.ceil(ratio * tf.cast(num_nodes, tf.float32))
    to_keep = tf.cast(to_keep, tf.int32)  # Nodes to keep in each graph

    index = tf.range(batch_n_nodes)
    index = (index - tf.gather(cumsum_start, I)) + (I * max_n_nodes)

    y_min = tf.reduce_min(scores)
    dense_y = tf.ones((n_graphs * max_n_nodes, ))
    dense_y = dense_y * tf.cast(
        y_min - 1, tf.float32
    )  # subtract 1 to ensure that filler values do not get picked
    dense_y = tf.assign(
        top_k_var, dense_y, validate_shape=False
    )  # top_k_var is a variable with unknown shape defined in the elsewhere
    dense_y = tf.scatter_update(dense_y, index, scores)
    dense_y = tf.reshape(dense_y, (n_graphs, max_n_nodes))

    perm = tf.argsort(dense_y, direction='DESCENDING')
    perm = perm + cumsum_start[:, None]
    perm = tf.reshape(perm, (-1, ))

    to_rep = tf.tile(tf.constant([1., 0.]), (n_graphs, ))
    rep_times = tf.reshape(
        tf.concat((to_keep[:, None], (max_n_nodes - to_keep)[:, None]), -1),
        (-1, ))
    mask = tf_repeat_1d(to_rep, rep_times)

    perm = tf.boolean_mask(perm, mask)

    return perm
예제 #9
0
def _upsampled_registration(target_image, src_image, upsample_factor):

    upsample_factor = tf.constant(upsample_factor, tf.float32)

    target_shape = tf.shape(target_image)
    target_image = tf.reshape(target_image, target_shape[:3])
    src_shape = tf.shape(src_image)
    src_image = tf.reshape(src_image, src_shape[:3])

    src_freq = fft2d(src_image)
    target_freq = fft2d(target_image)

    shape = tf.reshape(tf.shape(src_freq)[1:3], (1, 2))
    shape = tf.cast(shape, tf.float32)
    shape = tf.tile(shape, (tf.shape(target_freq)[0], 1))
    image_product = src_freq * tf.conj(target_freq)
    cross_correlation = tf.spectral.ifft2d(image_product)

    maxima = find_maxima(tf.abs(cross_correlation))
    midpoints = fix(tf.cast(shape, tf.float32) / 2.)

    shifts = maxima
    shifts = tf.where(shifts > midpoints, shifts - shape, shifts)
    shifts = tf.round(shifts * upsample_factor) / upsample_factor

    upsampled_region_size = tf.ceil(upsample_factor * 1.5)
    dftshift = fix(upsampled_region_size / 2.0)
    normalization = tf.cast(tf.size(src_freq[0]), tf.float32)
    normalization *= upsample_factor**2
    sample_region_offset = dftshift - shifts * upsample_factor

    data = tf.conj(image_product)
    upsampled_dft = _upsampled_dft(data, upsampled_region_size,
                                   upsample_factor, sample_region_offset)

    cross_correlation = tf.conj(upsampled_dft)
    cross_correlation /= tf.cast(normalization, tf.complex64)
    cross_correlation = tf.abs(cross_correlation)

    maxima = find_maxima(cross_correlation)
    maxima = maxima - dftshift
    shifts = shifts + maxima / upsample_factor

    return shifts
예제 #10
0
 def __init__(self, use_mask):
     self.use_mask = ktf.constant(use_mask, dtype=ktf.bool)  #True or False
     super(GlobalAttentionGeneral, self).__init__()