Exemple #1
0
    def segment_encoded_signal(self, x):
        x1 = tf.reshape(
            x, (self.batch_size, self.signal_length_samples // self.chunk_size,
                self.chunk_size, self.num_filters_in_encoder))
        x2 = tf.roll(x, shift=-self.chunk_advance, axis=1)
        x2 = tf.reshape(
            x2,
            (self.batch_size, self.signal_length_samples // self.chunk_size,
             self.chunk_size, self.num_filters_in_encoder))
        x2 = x2[:, :-1, :, :]  # Discard last segment with invalid data

        x_concat = tf.concat([x1, x2], axis=1)
        x = x_concat[:, ::self.num_full_chunks, :, :]
        for i in range(1, self.num_full_chunks):
            x = tf.concat([x, x_concat[:, i::self.num_full_chunks, :, :]],
                          axis=1)
        return x
Exemple #2
0
def _bin_weighted(data, grid, weights, method='linear'):

    if weights is None:
        weights = tf.ones_like(data, ztypes.float)

    weights = weights / tf.reduce_sum(weights)

    grid_size = tf.size(grid)
    grid_min = tf.math.reduce_min(grid)
    grid_max = tf.math.reduce_max(grid)
    num_intervals = tf.math.subtract(grid_size, tf.constant(1))
    dx = tf.math.divide(tf.math.subtract(grid_max, grid_min),
                        tf.cast(num_intervals, ztypes.float))

    transformed_data = tf.math.divide(tf.math.subtract(data, grid_min), dx)

    # Compute the integral and fractional part of the data
    # The integral part is used for lookups, the fractional part is used
    # to weight the data
    integral = tf.math.floor(transformed_data)
    fractional = tf.math.subtract(transformed_data, integral)

    if method == 'simple':
        fractional = tf.cast(fractional > 0.5, fractional.dtype) * fractional

    # Compute the weights for left and right side of the linear binning routine
    frac_weights = tf.math.multiply(fractional, weights)
    neg_frac_weights = tf.math.subtract(weights, frac_weights)

    #tf.math.bincount only works with tf.int32
    bincount_left = tf.roll(tf.concat(
        tf.math.bincount(tf.cast(integral, tf.int32),
                         weights=frac_weights,
                         minlength=grid_size,
                         maxlength=grid_size), tf.constant(0)),
                            shift=1,
                            axis=0)
    bincount_right = tf.math.bincount(tf.cast(integral, tf.int32),
                                      weights=neg_frac_weights,
                                      minlength=grid_size,
                                      maxlength=grid_size)

    bincount = tf.cast(tf.add(bincount_left, bincount_right), ztypes.float)

    return bincount
Exemple #3
0
def relative_position_logits(q, R, direction='left'):
    """ For efficient, assumes that R has been flipped along the
    position axis, i.e. R -> tf.reverse(R, [2])"""
    if direction == 'left':
        return left_shift(tf.matmul(q, R, transpose_b=True))
    elif direction == 'right':
        R_flipud = tf.reverse(R, [2])
        return right_shift(tf.matmul(q, R_flipud, transpose_b=True))
    elif direction == 'both':
        R_flipud = tf.reverse(R, [2])
        num_to_roll = R.shape[2] - q.shape[2]
        lower = left_shift(tf.matmul(q, R, transpose_b=True))
        upper = right_shift(tf.matmul(q, R_flipud, transpose_b=True))
        upper = tf.roll(upper, shift=num_to_roll, axis=3)
        mask = create_lookahead_mask(q.shape[2], R.shape[2], lower.dtype)
        return (1 - mask) * lower + (mask) * upper
    else:
        raise ValueError("Choose valid direction.")
    def shiftGaugeField(self, gaugeField, cpt, sign):
        gaugeFieldShifted = tf.roll(gaugeField, -sign, cpt)

        pauliMatNum = self.boundaryConditions[cpt]

        if pauliMatNum == 0:
            return gaugeFieldShifted

        latShape = tf.shape(gaugeField)[0:3]
        indices = FieldTools.boundaryIndices(latShape, cpt, sign)

        updates = tf.gather_nd(gaugeFieldShifted, indices)
        updates = FieldTools.pauliMatrix(pauliMatNum) @\
            updates @ FieldTools.pauliMatrix(pauliMatNum)

        gaugeFieldShifted = tf.tensor_scatter_nd_update(
            gaugeFieldShifted, indices, updates)
        return gaugeFieldShifted
    def shiftCovDeriv(self, covDeriv, cpt, sign):
        covDerivShifted = tf.roll(covDeriv, -sign, cpt)

        if cpt != 0:
            return covDerivShifted

        indices = FieldTools.boundaryIndices(self.latShape, cpt, sign)

        if sign == -1:
            updates = tf.zeros(tf.concat([tf.shape(indices)[0:-1], [2, 2]], 0),
                               dtype=tf.complex128)
        else:
            updates = tf.gather_nd(covDeriv, indices)

        covDerivShifted = tf.tensor_scatter_nd_update(covDerivShifted, indices,
                                                      updates)

        return covDerivShifted
Exemple #6
0
def Weight_Transform(w_vector, k, n):
    w_sliced = tf.slice(w_vector, [0], [k])
    paddings = tf.constant([[0, k]])
    w_pad = tf.pad(w_vector, paddings, "CONSTANT")
    paddings = tf.constant([[0, k + 1]])
    w_sliced_pad = tf.pad(w_sliced, paddings, "CONSTANT")
    w_sliced_pad = K.reverse(w_sliced_pad, axes=0)
    weight = tf.math.add(w_sliced_pad, w_pad)

    paddings = tf.constant([[0, n - 2 * k - 1]])
    weight = tf.pad(weight, paddings, "CONSTANT")
    weight = tf.reshape(weight, [1, n])

    out = weight
    for ii in range(n - 1):
        A = tf.roll(weight, shift=ii + 1, axis=1)
        out = tf.concat([out, A], axis=0)

    return out
Exemple #7
0
            def _do_update(x_update_diff_norm_sq, x_update,
                           hess_matmul_x_update):  # pylint: disable=missing-docstring
                hessian_column_with_l2 = sparse_or_dense_matvecmul(
                    hessian_unregularized_loss_outer,
                    hessian_unregularized_loss_middle *
                    _sparse_or_dense_matmul_onehot(
                        hessian_unregularized_loss_outer, coord),
                    adjoint_a=True)

                if l2_regularizer is not None:
                    hessian_column_with_l2 += _one_hot_like(
                        hessian_column_with_l2,
                        coord,
                        on_value=2. * l2_regularizer)

                # Move the batch dimensions of `hessian_column_with_l2` to rightmost in
                # order to conform to `hess_matmul_x_update`.
                n = tf.rank(hessian_column_with_l2)
                perm = tf.roll(tf.range(n), shift=1, axis=0)
                hessian_column_with_l2 = tf.transpose(a=hessian_column_with_l2,
                                                      perm=perm)

                # Update the entire batch at `coord` even if `delta` may be 0 at some
                # batch coordinates. In those cases, adding `delta` is a no-op.
                x_update = tf.tensor_scatter_nd_add(x_update, [[coord]],
                                                    [delta])

                with tf.control_dependencies([x_update]):
                    x_update_diff_norm_sq_ = x_update_diff_norm_sq + delta**2
                    hess_matmul_x_update_ = (hess_matmul_x_update +
                                             delta * hessian_column_with_l2)

                    # Hint that loop vars retain the same shape.
                    x_update_diff_norm_sq_.set_shape(
                        x_update_diff_norm_sq_.shape.merge_with(
                            x_update_diff_norm_sq.shape))
                    hess_matmul_x_update_.set_shape(
                        hess_matmul_x_update_.shape.merge_with(
                            hess_matmul_x_update.shape))

                    return [
                        x_update_diff_norm_sq_, x_update, hess_matmul_x_update_
                    ]
Exemple #8
0
 def parse_image_features(example):
     data_features = {
         'image': tf.io.FixedLenFeature([], tf.string),
         'height': tf.io.FixedLenFeature([], tf.int64),
         'width': tf.io.FixedLenFeature([], tf.int64),
         'index': tf.io.FixedLenFeature([], tf.int64),
     }
     for lens_param in lens_params:
         data_features[lens_param] = tf.io.FixedLenFeature([], tf.float32)
     parsed_dataset = tf.io.parse_single_example(example, data_features)
     image = tf.io.decode_raw(parsed_dataset['image'], out_type=float)
     image = tf.reshape(
         image, (parsed_dataset['height'], parsed_dataset['width'], 1))
     # Add the noise using the baobab noise function (which is a tf graph)
     if noise_function is not None:
         image = noise_function.add_noise(image)
     # Shift the images if that's specified
     if shift_pixels > 0:
         # Get the x and y shift from a categorical distribution centered at 0
         # and going from -shift_pixels to shift_pixels
         shifts = tf.squeeze(tf.random.categorical(
             tf.math.log([[0.5] * (2 * shift_pixels + 1)]), 2) -
                             shift_pixels,
                             axis=0)
         # Shift the image accordingly
         image = tf.roll(image, shifts, axis=[0, 1])
         # Update the x shifts and y shifts
         for x_param in shift_params[0]:
             # The shift in the column corresponds to x and increasing column
             # corresponds to increasing x.
             parsed_dataset[x_param] += tf.cast(
                 shifts[1], tf.float32) * normed_pixel_scale[x_param]
         for y_param in shift_params[1]:
             # The shift in the row corresponds to y and increasing row
             # corresponds to increasing y.
             parsed_dataset[y_param] += tf.cast(
                 shifts[0], tf.float32) * normed_pixel_scale[y_param]
     # If the images must be normed divide by the std
     if norm_images:
         image = image / tf.math.reduce_std(image)
     lens_param_values = tf.stack(
         [parsed_dataset[lens_param] for lens_param in lens_params])
     return image, lens_param_values
Exemple #9
0
 def naive_max_ranking_roll(y_true, y_p):
     y_neg = tf.roll(y_p, shift=1, axis=0)
     vidp, senp = tf.split(y_p, 2, axis=1)
     vidn, senn = tf.split(y_neg, 2, axis=1)
     # It's called Roll because treats its neighbor as negative
     vp_sn = cos_similarity(vidp, senn)
     d_vp_sn = tf.linalg.diag_part(vp_sn)
     r_vp_sn = tf.expand_dims(d_vp_sn, axis=1)
     vp_sp = cos_similarity(vidp, senp)
     d_vp_sp = tf.linalg.diag_part(vp_sp)
     r_vp_sp = tf.expand_dims(d_vp_sp, axis=1)
     vn_sp = cos_similarity(vidn, senp)
     d_vn_sp = tf.linalg.diag_part(vn_sp)
     r_vn_sp = tf.expand_dims(d_vn_sp, axis=1)
     # Max ranking loss
     loss = tf.maximum(0.0, margin + r_vp_sn - r_vp_sp) + tf.maximum(
         0.0, margin + r_vn_sp - r_vp_sp)
     loss = tf.reduce_mean(loss) + 1e-12
     return loss
Exemple #10
0
def _roll_example(id, train_length, train_examples, test_input, test_output,
                  y_shift, x_shift):
    """ Roll all images by specified y and x shifts """

    train_examples = tf.roll(train_examples, y_shift, axis=2)
    train_examples = tf.roll(train_examples, x_shift, axis=3)

    test_input = tf.roll(test_input, y_shift, axis=1)
    test_input = tf.roll(test_input, x_shift, axis=2)

    test_output = tf.roll(test_output, y_shift, axis=1)
    test_output = tf.roll(test_output, x_shift, axis=2)

    return id, train_length, train_examples, test_input, test_output
def create_adversarial_model(rgb_input):

    default_adv_flag = tf.constant(1.0, dtype=tf.float32)
    adv_flag = tf.placeholder_with_default(default_adv_flag,
                                           shape=default_adv_flag.shape)

    eps_rgb = tf.Variable(tf.zeros(shape=[_BASE_PATCH_FRAMES, 1, 1, 3],
                                   dtype=tf.float32),
                          name='eps')

    mask = tf.ones(shape=[_SAMPLE_VIDEO_FRAMES, _IMAGE_SIZE, _IMAGE_SIZE, 3])

    indices = np.linspace(_IND_START, _IND_END, _IND_END - _IND_START + 1)
    mask_indecator = tf.one_hot(indices=indices, depth=_SAMPLE_VIDEO_FRAMES)
    mask_indecator = tf.reduce_sum(mask_indecator, reduction_indices=0)
    mask_indecator = tf.reshape(mask_indecator,
                                [_SAMPLE_VIDEO_FRAMES, 1, 1, 1])
    mask_rgb = tf.convert_to_tensor(mask * mask_indecator,
                                    name='eps_mask')  # same shape as input
    # adversarial_inputs_rgb = tf.nn.tanh(rgb_input + adv_flag * (mask_rgb * eps_rgb),name='adversarial_input')
    random_shift = tf.random_uniform(dtype=tf.int32,
                                     minval=0,
                                     maxval=_SAMPLE_VIDEO_FRAMES,
                                     shape=[])
    cyclic_rgb_input = tf.roll(rgb_input, shift=random_shift, axis=1)

    cyclic_flag_default = tf.constant(0.0, dtype=tf.float32)
    cyclic_flag = tf.placeholder_with_default(cyclic_flag_default,
                                              name='cyclic_flag',
                                              shape=cyclic_flag_default.shape)

    model_input = cyclic_flag * cyclic_rgb_input + (1 -
                                                    cyclic_flag) * rgb_input

    adversarial_inputs_rgb = tf.clip_by_value(model_input + adv_flag *
                                              (mask_rgb * eps_rgb),
                                              clip_value_min=-1.0,
                                              clip_value_max=1.0,
                                              name='adversarial_input')

    # return pertubation, adversarial, adversarial ph, cyclic ph
    return eps_rgb, adversarial_inputs_rgb, adv_flag, cyclic_flag
Exemple #12
0
def new3_get_edge_feature(point_cloud, nn_idx, k=20):
    """Construct edge feature for each point
        Args:
          point_cloud: (batch_size, num_points, 1, num_dims)
          nn_idx: (batch_size, num_points, k)
          k: int

        Returns:
          edge features: (batch_size, num_points, k, num_dims)
        """
    og_batch_size = point_cloud.get_shape().as_list()[0]
    point_cloud = tf.squeeze(point_cloud)
    if og_batch_size == 1:
        point_cloud = tf.expand_dims(point_cloud, 0)

    point_cloud_central = point_cloud

    point_cloud_shape = point_cloud.get_shape()
    batch_size = point_cloud_shape[0].value
    num_points = point_cloud_shape[1].value
    num_dims = point_cloud_shape[2].value

    idx_ = tf.range(batch_size) * num_points
    idx_ = tf.reshape(idx_, [batch_size, 1, 1])

    point_cloud_flat = tf.reshape(point_cloud, [-1, num_dims])
    point_cloud_neighbors = tf.gather(point_cloud_flat, nn_idx + idx_)
    point_cloud_central = tf.expand_dims(point_cloud_central, axis=-2)

    point_cloud_central = tf.tile(point_cloud_central, [1, 1, k, 1])
    point_nei_minus_self = point_cloud_neighbors - point_cloud_central
    point_nei_minus_self_2 = tf.multiply(point_nei_minus_self,
                                         point_nei_minus_self)
    point_nei_minus_self_roll = tf.roll(point_nei_minus_self, shift=1, axis=-1)
    point_nei_minus_self_2_roll = tf.multiply(point_nei_minus_self,
                                              point_nei_minus_self_roll)
    edge_feature = tf.concat([
        point_cloud_central, point_nei_minus_self, point_nei_minus_self_2,
        point_nei_minus_self_2_roll
    ],
                             axis=-1)
    return edge_feature
Exemple #13
0
def sample_top_p(logits, top_p):
    """Chooses most probable logits with cumulative probabilities upto top_p.

  Sets the remaining logits to negative infinity.

  Args:
    logits: Input logits for next token.
    top_p: Float tensor with a value >=0 and < 1.0

  Returns:
    Logits with top_p filtering applied.
  """
    sorted_indices = tf.argsort(logits, direction="DESCENDING")
    # Flatten logits as tf.gather on TPU needs axis to be compile time constant.
    logits_shape = decoding_module.shape_list(logits)
    range_for_gather = tf.expand_dims(tf.range(0, logits_shape[0]), axis=1)
    range_for_gather = tf.tile(range_for_gather * logits_shape[1],
                               [1, logits_shape[1]]) + sorted_indices
    flattened_logits = tf.reshape(logits, [-1])
    flattened_sorted_indices = tf.reshape(range_for_gather, [-1])
    sorted_logits = tf.reshape(
        tf.gather(flattened_logits, flattened_sorted_indices),
        [logits_shape[0], logits_shape[1]])
    cumulative_probs = tf.cumsum(tf.nn.softmax(sorted_logits, axis=-1),
                                 axis=-1)

    # Remove tokens with cumulative probability above the threshold.
    sorted_indices_to_remove = cumulative_probs > top_p

    # Shift the indices to the right to keep the first token above threshold.
    sorted_indices_to_remove = tf.roll(sorted_indices_to_remove, 1, axis=-1)
    sorted_indices_to_remove = tf.concat([
        tf.zeros_like(sorted_indices_to_remove[:, :1]),
        sorted_indices_to_remove[:, 1:]
    ], -1)

    # Scatter sorted indices to original indexes.
    indices_to_remove = scatter_values_on_batch_indices(
        sorted_indices_to_remove, sorted_indices)
    top_p_logits = set_tensor_by_indices_to_value(logits, indices_to_remove,
                                                  np.NINF)
    return top_p_logits
Exemple #14
0
def shift_zeros(data, mask, axis=-2):
    zeros = tf.zeros_like(data)

    data_flat = tf.boolean_mask(data, mask)
    nonzero_lens = tf.reduce_sum(tf.cast(mask, dtype=tf.int32), axis=-2)
    nonzero_mask = tf.sequence_mask(nonzero_lens, maxlen=tf.shape(mask)[-2])
    perm1 = tf.range(0, tf.shape(tf.shape(data))[0] - 2)
    perm2 = tf.roll(tf.range(
        tf.shape(tf.shape(data))[0] - 2,
        tf.shape(tf.shape(data))[0]),
                    1,
                    axis=-1)

    perm = tf.concat([perm1, perm2], axis=-1)
    nonzero_mask = tf.transpose(nonzero_mask, perm=perm)
    inds = tf.cast(tf.where(nonzero_mask), dtype=tf.int32)
    nonzero_data = tf.tensor_scatter_nd_update(
        zeros, tf.cast(tf.where(nonzero_mask), dtype=tf.int32), data_flat)

    return nonzero_data
Exemple #15
0
def center_image(image, pad=0, only_even_shifts=False):
    image = pad_image(image, pad)

    spatial_dims = get_spatial_dims()

    com = center_of_mass(image)

    shift = tf.squeeze(com, axis=-1)
    shift = tf.cast(shift, tf.int32)

    #theory - only shifting by multiples of 2 may help avoid artifacts do to sensor debayering
    if only_even_shifts:
        shift = tf.bitwise.bitwise_and(shift, -2)

    shift = shift * -1
    #print("shift:", shift)
    shift_axis = spatial_dims
    image = tf.roll(image, shift=shift, axis=shift_axis)

    return image, shift, shift_axis
Exemple #16
0
def get_pca_xy_angle(positions, rotation_axis=2):
    from sklearn.decomposition import PCA

    def get_pca(xy):
        pca = PCA(n_components=1)
        xy = xy.numpy()
        pca.fit_transform(xy)
        pca_vec = tf.squeeze(pca.components_, axis=0)
        return pca_vec

    shift = 2 - rotation_axis
    if shift != 0:
        positions = tf.roll(positions, shift, axis=-1)

    xy, _ = tf.split(positions, [2, 1], axis=-1)
    pca_vec = tf.py_function(get_pca, [xy], positions.dtype)
    pca_vec.set_shape((2, ))
    x, y = tf.unstack(pca_vec, axis=0)
    angle = tf.atan2(y, x)
    return angle
Exemple #17
0
    def wrap(inputs):
        logits, output_len = inputs
        outputs = {
            'blank_last_logits': logits,
            'out_len': output_len,
            'logits': tf.roll(logits, shift=1, axis=-1),
        }
        outputs['blank_last_softmax'] = tf.nn.softmax(
            outputs['blank_last_logits'], axis=-1)
        outputs['softmax'] = tf.nn.softmax(outputs['logits'])

        greedy_decoded = \
            ctc_ops.ctc_greedy_decoder(inputs=tf.transpose(outputs['blank_last_logits'], perm=[1, 0, 2]),
                                       sequence_length=tf.cast(K.flatten(outputs['out_len']),
                                                               'int32'))[0][0]
        greedy_decoded = tf.cast(greedy_decoded, 'int32', 'greedy_int32')
        outputs['decoded'] = tf.sparse.to_dense(
            greedy_decoded,
            default_value=tf.constant(-1, dtype=greedy_decoded.dtype)) + 1
        return outputs
  def test_spline_pano_spline_roll(self):
    # random image
    random_state = np.random.RandomState(seed=0)
    warp_input = random_state.uniform(size=[1, 40, 40, 3])

    with tf.name_scope("control_point_warp"):
      # Testing an x-shift of 0.1 which is 10% of the image.
      # This is equivalent to a horizontal roll when panorama is True.

      tf_warp_input = tf.constant(warp_input, dtype=tf.float32)

      x_control_points = tf.ones([1, 10, 10, 1]) * 0.1
      y_control_points = tf.zeros([1, 10, 10, 1])

      control_points = tf.concat([y_control_points, x_control_points], axis=-1)
      warped = image_alignment.bspline_warp(
          control_points, tf_warp_input, 2, pano_pad=True)
      roll10_tf_warp_input = tf.roll(tf_warp_input, -4, axis=2)
    with self.session():
      self.assertAllClose(roll10_tf_warp_input.eval(), warped.eval())
    def make_model_spec_psf():
      input_layer = tf.placeholder(tf.float32, shape=input_shape)
      psf_layer = tf.placeholder(tf.float32, shape=psf_shape)
      x = self.embed(tf.expand_dims(input_layer, -1))

      # If we have access to the PSF, we add this information to the encoder
      if hparams.encode_psf and 'psf' in features:
        psf_image = tf.expand_dims(tf.signal.irfft2d(tf.cast(psf_layer[...,0], tf.complex64)), axis=-1)
        # Roll the image to undo the fftshift, assuming x1 zero padding and x2 subsampling
        psf_image = tf.roll(psf_image, shift=[input_shape[1], input_shape[2]], axis=[1,2])
        psf_image = tf.image.resize_with_crop_or_pad(psf_image, input_shape[1], input_shape[2])
        net_psf = tf.layers.conv2d(psf_image,
                                   hparams.hidden_size // 4, 5,
                                   padding='same', name="psf_embed_1")
        net_psf = common_layers.layer_norm(net_psf, name="psf_norm")
        x, encoder_layers = self.encoder(tf.concat([x, net_psf], axis=-1))
      else:
        x, encoder_layers = self.encoder(x)
      b, b_loss = self.bottleneck(x)
      hub.add_signature(inputs={'input':input_layer, 'psf':psf_layer}, outputs=b)
Exemple #20
0
    def create_tf_roll_net(self, shift, axis, x_shape, input_type, ir_version):
        tf.compat.v1.reset_default_graph()

        # Create the graph and model
        with tf.compat.v1.Session() as sess:
            tf_x_shape = x_shape.copy()
            # reshaping
            if len(tf_x_shape) >= 3:
                tf_x_shape.append(tf_x_shape.pop(1))

            x = tf.compat.v1.placeholder(input_type, tf_x_shape, 'Input')
            roll = tf.roll(x, shift=shift, axis=axis)

            tf.compat.v1.global_variables_initializer()
            tf_net = sess.graph_def

        # TODO: add reference IR net. Now it is omitted and tests only inference result that is more important
        ref_net = None

        return tf_net, ref_net
def expected_gradients(inputs, labels, model, index_true_class=True):
    '''
    Given a batch of inputs and labels, and a model,
    symbolically computes a single sample of expected gradients.

    Args:
        inputs: A [batch_size, ...]-shaped tensor. The input to a model.
        labels: A [batch_size, num_classes]-shaped tensor.
                The true class labels in one-hot encoding form,
                assuming a multi-class problem.
        model:  A tf.keras.Model object, or a subclass object thereof.
        index_true_class: Whether or not to take the gradients of the output with respect to the true
            class. True by default. This should be set to True in the multi-class setting, and False
            in the regression setting.
    Returns:
        A tensor the same shape as the input representing a single sample of expected gradients
        of the output of the model with respect to the input.
    '''
    current_batch_size = tf.shape(inputs)[0]

    #Here we have to compute the interpolated input into the model
    references = tf.roll(inputs, shift=1, axis=0)
    alphas = tf.random.uniform(shape=(current_batch_size, 1, 1, 1),
                               minval=0.0,
                               maxval=1.0,
                               dtype=tf.float32)
    interpolated_inputs = alphas * inputs + (1.0 - alphas) * references

    with tf.GradientTape() as tape:
        tape.watch(interpolated_inputs)
        predictions = model(interpolated_inputs, training=True)

        if index_true_class:
            predictions_indexed = _index_predictions(predictions, labels)
        else:
            predictions_indexed = predictions

    input_gradients = tape.gradient(predictions_indexed, interpolated_inputs)
    difference_from_reference = inputs - references
    expected_gradients = input_gradients * difference_from_reference
    return expected_gradients
Exemple #22
0
def apply_demosaic_filter(bayer_filtered_image, demosaic_kernels):
    bayer_height = demosaic_kernels.shape[-6]
    bayer_width = demosaic_kernels.shape[-5]
    kernel_height = demosaic_kernels.shape[-4].value
    kernel_width = demosaic_kernels.shape[-3].value

    col_subimages = []
    for tile_y in range(0, bayer_height):
        row_subimages = []
        for tile_x in range(0, bayer_width):
            demosaic_kernel_for_tile_pos = demosaic_kernels[..., tile_y,
                                                            tile_x, :, :, :, :]

            # shift the image, so the strides hit the appropriate pixels, all from the correct part of the tile pattern
            # also needing to shift by the center of the kernel seems like a tf bug...
            shifted_image = tf.roll(bayer_filtered_image,
                                    shift=(-tile_y + (kernel_height // 2),
                                           -tile_x + (kernel_width // 2)),
                                    axis=(-3, -2))

            subimage_for_tile_pos = tf.nn.conv2d(shifted_image,
                                                 demosaic_kernel_for_tile_pos,
                                                 strides=(bayer_height,
                                                          bayer_width),
                                                 padding='SAME')

            # and now for a bunch of voodoo magic with indices to recombine this monstrosity...
            row_subimages.append(subimage_for_tile_pos)
        row_subimages = tf.stack(row_subimages, axis=-2)
        row_subimages = tf.reshape(
            row_subimages,
            shape=(bayer_filtered_image.shape[-4],
                   bayer_filtered_image.shape[-3] // bayer_height,
                   bayer_filtered_image.shape[-2], demosaic_kernels.shape[-1]))
        col_subimages.append(row_subimages)
    col_subimages = tf.stack(col_subimages, axis=-3)
    col_subimages = tf.reshape(
        col_subimages,
        shape=(bayer_filtered_image.shape[-4], bayer_filtered_image.shape[-3],
               bayer_filtered_image.shape[-2], demosaic_kernels.shape[-1]))
    return col_subimages
def lossFunction(N_r, dN_r, ddN_r, lossfunction, stepsize, omega):
    ''' computes the loss of NN, returns loss, show_loss, error
        
        can compute linloss aswell as pidloss spcified by lossfunction
        
        stepsize is only relevant for pid loss and refers to the time intervals in t_batch
        '''

    # differential equation
    error = tf.multiply(tf.sin(N_r), omega**2) + ddN_r

    # linear loss
    if lossfunction == 'linloss':

        loss = tf.reduce_mean(tf.square(error))

    # proportional, integral, differential loss
    if lossfunction == 'pidloss':

        # pid constants
        a = 1
        b = 1
        c = 1

        # proportional part
        P = tf.reduce_mean(tf.square(error))

        # integral part
        I = tf.reduce_mean(tf.square(tf.cumsum(error, axis=1))) / stepsize

        # differential part
        __ = tf.roll(error, 1, axis=1)
        _ = __[:, 1:, :]
        e_rolled = tf.concat(
            [_, tf.ones((tf.shape(error)[0], 1, tf.shape(error)[2]))], axis=1)
        D = tf.reduce_mean(tf.square(error - e_rolled)) / stepsize

        loss = a * P + b * I + c * D

    show_loss = tf.reduce_mean(tf.square(error))
    return loss, show_loss, error
Exemple #24
0
 def get_loss_and_grads_with_tiling(self,inputs,tile_size=512,total_variation_weight=0.004):
     shift_r,shift_d,tiled_image=self.random_image_tiling(inputs[0],tile_size)
     grads=tf.zeros_like(tiled_image)
     x_range = tf.range(0, tiled_image.shape[0], tile_size)[:-1]
     if not tf.cast(len(x_range), bool):
         x_range= tf.constant([0])
     y_range = tf.range(0, tiled_image.shape[1], tile_size)[:-1]
     if not tf.cast(len(y_range), bool):
         y_range=tf.constant([0])
     for x in x_range:
         for y in y_range:
             with tf.GradientTape() as tape:
                 tape.watch(tiled_image)
                 image_tile= tf.expand_dims(tiled_image[x:x+tile_size, y:y+tile_size],axis=0)
                 activations=self.model_output(self.dream_model,image_tile)
                 loss=self.get_loss(activations)
                 loss=loss+total_variation_weight*tf.image.total_variation(image_tile) 
             grads=grads+tape.gradient(loss,tiled_image)
     grads = tf.roll(grads, shift=[-shift_r,-shift_d], axis=[1,0])
     grads /= tf.math.reduce_std(grads) + 1e-8
     return loss,grads
Exemple #25
0
    def forward(self, data, state):
        """ Forward method to perform mixup batch augmentation

        Args:
            data: Batch data to be augmented
            state: Information about the current execution context.

        Returns:
            Mixed-up batch data
        """
        iterdata = data if isinstance(
            data, list) else list(data) if isinstance(data, tuple) else [data]
        lam = self.beta.sample()
        # Could do random mix-up using tf.gather() on a shuffled index list, but batches are already randomly ordered,
        # so just need to roll by 1 to get a random combination of inputs. This also allows MixUpLoss to easily compute
        # the corresponding Y values
        mix = [
            lam * dat + (1.0 - lam) * tf.roll(dat, shift=1, axis=0)
            for dat in iterdata
        ]
        return mix + [lam]
def getPointCoord(pointTensor):
    """
    from the tensor layer holding one filter(image), get the 2D coordinate of the point in the image
    """

    # flatten the tensor
    featureFlatten = tf.reshape(pointTensor, [-1])

    # get the max argument of the flatten array
    featureFlattenArgMax = tf.math.argmax(featureFlatten)

    # get the coordinates of that argument in tensor of the shape as maskImageTensor
    coords = tf.unravel_index(indices=tf.cast(featureFlattenArgMax,
                                              dtype=tf.int32),
                              dims=tf.cast(tf.shape(pointTensor),
                                           dtype=tf.int32))
    coords = tf.roll(
        coords, shift=1,
        axis=0)  # roll coords to get the right width height format

    return coords.numpy()
    def _linear_binning(self, weights=None):

        if weights is None:
            weights = tf.ones_like(self._data)

        grid_min = tf.math.reduce_min(self._grid)
        grid_max = tf.math.reduce_max(self._grid)
        num_intervals = tf.math.subtract(tf.size(self._grid), tf.constant(1))
        dx = tf.math.divide(tf.math.subtract(grid_max, grid_min),
                            tf.cast(num_intervals, tf.float32))

        transformed_data = tf.math.divide(
            tf.math.subtract(self._data, grid_min), dx)

        # Compute the integral and fractional part of the data
        # The integral part is used for lookups, the fractional part is used
        # to weight the data
        integral = tf.math.floor(transformed_data)
        fractional = tf.math.subtract(transformed_data, integral)

        # Compute the weights for left and right side of the linear binning routine
        frac_weights = tf.math.multiply(fractional, weights)
        neg_frac_weights = tf.math.subtract(weights, frac_weights)

        # If the data is not a subset of the grid, the integral values will be
        # outside of the grid. To solve the problem, we filter these values away
        #unique_integrals = tf.unique(integral)
        #unique_integrals = unique_integrals[(unique_integrals >= 0) & (unique_integrals <= len(grid_points))]

        bincount_left = tf.roll(tf.concat(
            tf.math.bincount(tf.cast(integral, tf.int32),
                             weights=frac_weights), tf.constant(0)),
                                shift=1,
                                axis=0)
        bincount_right = tf.math.bincount(tf.cast(integral, tf.int32),
                                          weights=neg_frac_weights)

        bincount = tf.add(bincount_left, bincount_right)

        return bincount
Exemple #28
0
        def flatten_fn(sample):
            seq_len = tf.shape(sample.data.observation)[0]
            arange = tf.range(seq_len)
            is_future_mask = tf.cast(arange[:, None] < arange[None],
                                     tf.float32)
            discount = self._config.discount**tf.cast(arange[None] - arange[:, None], tf.float32)  # pylint: disable=line-too-long
            probs = is_future_mask * discount
            # The indexing changes the shape from [seq_len, 1] to [seq_len]
            goal_index = tf.random.categorical(logits=tf.math.log(probs),
                                               num_samples=1)[:, 0]
            state = sample.data.observation[:-1, :self._config.obs_dim]
            next_state = sample.data.observation[1:, :self._config.obs_dim]

            # Create the goal observations in three steps.
            # 1. Take all future states (not future goals).
            # 2. Apply obs_to_goal.
            # 3. Sample one of the future states. Note that we don't look for a goal
            # for the final state, because there are no future states.
            goal = sample.data.observation[:, :self._config.obs_dim]
            goal = contrastive_utils.obs_to_goal_2d(
                goal,
                start_index=self._config.start_index,
                end_index=self._config.end_index)
            goal = tf.gather(goal, goal_index[:-1])
            new_obs = tf.concat([state, goal], axis=1)
            new_next_obs = tf.concat([next_state, goal], axis=1)
            transition = types.Transition(observation=new_obs,
                                          action=sample.data.action[:-1],
                                          reward=sample.data.reward[:-1],
                                          discount=sample.data.discount[:-1],
                                          next_observation=new_next_obs,
                                          extras={
                                              'next_action':
                                              sample.data.action[1:],
                                          })
            # Shift for the transpose_shuffle.
            shift = tf.random.uniform((), 0, seq_len, tf.int32)
            transition = tree.map_structure(
                lambda t: tf.roll(t, shift, axis=0), transition)
            return transition
    def call(self, inputs):

        # input shape [tot_utt, embed_dim]
        inputs = tf.keras.backend.permute_dimensions(inputs, [1, 0])  # [embed_dim, tot_utt]
        # centroid_column
        self_block = tf.keras.backend.ones(shape=[self._num_utterance, self._num_utterance], dtype=tf.float32) - tf.keras.backend.eye(self._num_utterance, dtype=tf.float32)
        self_block = self_block / (self._num_utterance - 1)
        # [num_spkr_utt, num_spkr_utt]
        centroid_block = tf.pad(self_block, [[0, 0], [0, (self._num_speakers - 1) * self._num_utterance]], name="normal_centroid_select_pad", constant_values=1/self._num_utterance)
        # [num_spkr_utt * num_spkr, num_spkr_utt]
        centroid_per_spkr = tf.pad(centroid_block, [[0, (self._num_speakers - 1) * self._num_utterance], [0, 0]], name="other_utterances_zero" , constant_values=0)
        # [tot_utt, tot_utt]
        #  ex) for spkr1
        # [[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,   ...   ]    {
        #  [1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,   ...   ]       ~
        #  [1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1,   ...   ]
        #  [1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1,   ...   ]
        #  [1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1,   ...   ]
        #  [1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1,   ...   ]      10개의 동일 화자 어터런스 선택(그 중 자기 자신은 제외)하는 Linear Combination Matrix
        #  [1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1,   ...   ]
        #  [1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1,   ...   ]
        #  [1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1,   ...   ]       ~
        #  [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1,   ...   ]        }
        #  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,   ...   ]
        #  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,   ...   ]
        #  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,   ...   ]
        #  [             ...                   ...   ]
        #  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,   ...   ]
        # ]

        # [tot_utt, tot_utt]
        centroid_per_spkr_list = [tf.roll(centroid_per_spkr, axis=0, shift=spk_idx * self._num_utterance) for spk_idx in range(self._num_speakers)]
        # num_spkr * [tot_utt, tot_utt]
        centroid_list = tf.keras.backend.stack(centroid_per_spkr_list, axis=-1)
        # [tot_utt, tot_utt, num_spkr]

        self_exclusive_centroids = tf.keras.backend.dot(inputs, centroid_list)
        # [embed_dim, tot_utt] * [tot_utt, tot_utt, num_spkr]
        # ---> [embed_dim, tot_utt, num_spkr]
        return self_exclusive_centroids
Exemple #30
0
    def shuffle(self, z_a, z_p):
        '''
        concat z_a and z_p (as self.concat does) but mixing z_a and z_p so they don't come from the same image
        input:
            - z_p: 16 x 16 x 1024
            - z_a: 16 x 16 x 1024
        output: 16 x 16 x 2048
        '''
        # shuffled_z_p = Lambda(lambda x: tf.random.shuffle(x))(z_p)    NOT DIFF

        # indexes = list(range(self.batch_size))
        # random.shuffle(indexes)
        # mixed_z_p = z_p[indexes,:,:,:]
        # concat = concatenate([z_a, mixed_z_p])

        shuffled_z_p = Lambda(lambda x: tf.roll(x, shift=1, axis=0))(z_p)
        concat = concatenate([z_a, shuffled_z_p])
        sums_ori = tf.math.reduce_sum(z_p, axis=(1, 2, 3))
        sums_aft = tf.math.reduce_sum(shuffled_z_p, axis=(1, 2, 3))
        print("z_p sums %s" % str(sums_ori))
        print("mixed z_p sums %s" % str(sums_aft))
        return concat