Exemplo n.º 1
0
 def _apply_divergence_concrete(self,scale_factor, name):
     divergence_fn = (lambda pl, pr: (tf.reduce_sum(tf.add(
               tf.multiply(pl,tf.subtract(tf.math.log(
                       tf.add(pl,tfk.backend.epsilon())), tf.math.log(pr))),
               tf.multiply( tf.subtract(tfk.backend.constant(1),pl), 
                   tf.subtract(tf.math.log(
                        tf.add(tf.subtract(tfk.backend.constant(1),pl),tfk.backend.epsilon())),
                       tf.math.log(pr))))  )
                /tf.cast(scale_factor, dtype=tf.float32)))
     divergence = tf.identity(
           divergence_fn(self.p_post, self.p_prior),
           name=name)
     self.add_loss(divergence)
Exemplo n.º 2
0
def exact_laplacian_kernel(x, y, stddev):
    r"""Computes exact Laplacian kernel value(s) for tensors x and y using stddev.

  The Laplacian kernel for vectors u, v is defined as follows:
       K(u, v) = exp(-||u-v|| / stddev)
  where the norm is the l1-norm. x, y can be either vectors or matrices. If they
  are vectors, they must have the same dimension. If they are matrices, they
  must have the same number of columns. In the latter case, the method returns
  (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and
  v is a row from y.

  Args:
    x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim].
    y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim].
    stddev: The width of the Gaussian kernel.

  Returns:
    A single value (scalar) with shape (1, 1)  if x, y are vectors or a matrix
    of shape (m, n) with entries K(u, v) (where K is the Laplacian kernel) for
    all (u,v) pairs where u, v are rows from x and y respectively.

  Raises:
    ValueError: if the shapes of x, y are not compatible.
  """
    x_aligned, y_aligned = _align_matrices(x, y)
    diff_l1_norm = tf.reduce_sum(tf.abs(tf.subtract(x_aligned, y_aligned)), 2)
    return tf.exp(-diff_l1_norm / stddev)
Exemplo n.º 3
0
def read_tensor_from_image_file(file_name,
                                input_height=299,
                                input_width=299,
                                input_mean=0,
                                input_std=255):
    input_name = "file_reader"
    output_name = "normalized"
    file_reader = tf.read_file(file_name, input_name)
    if file_name.endswith(".png"):
        image_reader = tf.image.decode_png(file_reader,
                                           channels=3,
                                           name='png_reader')
    elif file_name.endswith(".gif"):
        image_reader = tf.squeeze(
            tf.image.decode_gif(file_reader, name='gif_reader'))
    elif file_name.endswith(".bmp"):
        image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')
    else:
        image_reader = tf.image.decode_jpeg(file_reader,
                                            channels=3,
                                            name='jpeg_reader')
    float_caster = tf.cast(image_reader, tf.float32)
    dims_expander = tf.expand_dims(float_caster, 0)
    resized = tf.image.resize_bilinear(dims_expander,
                                       [input_height, input_width])
    normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
    sess = tf.Session()
    result = sess.run(normalized)

    return result
Exemplo n.º 4
0
def nearby_difference(x):
    """Compute L2 norms for nearby entries in a batch."""
    # This is a very rough measure of diversity.
    with tf.device('cpu'):
        x1 = tf.reshape(x, shape=[int(x.shape[0]), -1])
        x2 = tf.roll(x1, shift=1, axis=0)
        return tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(x1, x2))))
def _flip_boxes_left_right(boxes):
    """Left-right flip the boxes.

  Args:
    boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
           Boxes are in normalized form meaning their coordinates vary
           between [0, 1].
           Each row is in the form of [ymin, xmin, ymax, xmax].

  Returns:
    Flipped boxes.
  """
    ymin, xmin, ymax, xmax = tf.split(value=boxes,
                                      num_or_size_splits=4,
                                      axis=1)
    flipped_xmin = tf.subtract(1.0, xmax)
    flipped_xmax = tf.subtract(1.0, xmin)
    flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)
    return flipped_boxes
Exemplo n.º 6
0
def cal_longest_subsequence(softmaxed_logits):

    int_logits = tf.dtypes.cast(tf.round(softmaxed_logits), dtype=tf.int32)
    index_tensor = tf.range(softmaxed_logits.shape[1], dtype=tf.int32)
    t_index = tf.reshape(index_tensor, [softmaxed_logits.shape[1], 1])
    new_seq = tf.transpose(tf.matmul(int_logits, t_index))[0].numpy().tolist()

    # new_seq = [3,2,4,5,6,5,5,6,7]
    # print(new_seq)
    subseq = []
    indexseq = []
    for i in range(len(new_seq)):
        if i == 0:
            subseq.append(new_seq[i])
            indexseq.append(i)
        else:
            if new_seq[i] > subseq[-1]:
                subseq.append(new_seq[i])
                indexseq.append(i)
            elif new_seq[i] < subseq[0]:
                subseq[0] = new_seq[i]
                indexseq[0] = i
            else:
                index = binarySearch(subseq, 0, len(subseq) - 1, new_seq[i])
                if index != -1:
                    subseq[index] = new_seq[i]
                    indexseq[index] = i
    # print(subseq)
    # print(indexseq)

    subseq_tensor = tf.reshape(subseq, [1, -1])
    index_tensor = tf.reshape(indexseq, [1, -1])
    # print(subseq_tensor,index_tensor)
    te = tf.subtract(subseq_tensor, index_tensor)
    # print(te)
    minus_result = tf.square(tf.subtract(subseq_tensor, index_tensor))
    one_tensor = tf.ones([1, len(subseq)], tf.int32)

    result = tf.divide(one_tensor, tf.add(one_tensor, minus_result))

    # return tf.reduce_sum(result)
    return subseq
Exemplo n.º 7
0
def cca_loss(x, y, dim, rcov1, rcov2, eps_eig=1e-12):
    """Create a TF graph to compute the joint dimensionality via CCA.

  This function computes the number of "dimensions" that two datasets (x and y)
  share. It creates a TF graph that connects the two TF nodes (x and y) to the
  eigenvalues computed while finding the two optimum rotations to line up the
  two datasets. We want to maximize this measure (- the loss).  This function
  computes the dimensions for two unrotated data.  Use the regular Pearson
  correlation after the CCA model is built.

  See:
  https://towardsdatascience.com/advanced-keras-constructing-complex-custom-losses-and-metrics-c07ca130a618

  Args:
    x: The first TF data of size n_frames x n_dims_x
    y: The second TF data of size n_frames x n_dims_y
    dim: The desired number of output dimensions
    rcov1: Amount to regularize the x covariance estimate
    rcov2: Amount to regularize the x covariance estimate
    eps_eig: Ignore eigenvalues (and dimensions) below this value.

  Returns:
    TF node that calculates the sum of the eigenvalues.  (Note, this gets
    larger as you get more dimensions in common, so you probably want to negate
    this to turn it into a real loss.)
  """
    logging.info('In cca_loss, data at start are: %s %s %d-D %g %g %g', x, y,
                 dim, rcov1, rcov2, eps_eig)
    # Remove the means.
    m1 = tf.math.reduce_mean(x, axis=0, keepdims=True)
    x = tf.subtract(x, m1)

    m2 = tf.math.reduce_mean(y, axis=0, keepdims=True)
    y = tf.subtract(y, m2)

    batch_norm = tf.cast(tf.shape(x)[0], tf.float32) - 1.0
    d1 = tf.compat.dimension_value(tf.shape(x)[1])  # Get dynamic tensor widths
    d2 = tf.compat.dimension_value(tf.shape(y)[1])
    eye1 = tf.eye(d1, dtype=tf.float32)
    cov_xx = tf.matmul(tf.transpose(x), x) / batch_norm + rcov1 * eye1
    eye2 = tf.eye(d2, dtype=tf.float32)
    cov_yy = tf.matmul(tf.transpose(y), y) / batch_norm + rcov2 * eye2
    cov_xy = tf.matmul(tf.transpose(x), y) / batch_norm

    x_vals, x_vecs = tf.linalg.eigh(cov_xx)
    y_vals, y_vecs = tf.linalg.eigh(cov_yy)

    # For numerical stability.
    idx1 = tf.where(x_vals > eps_eig)[:, 0]
    x_vals = tf.gather(x_vals, idx1)
    x_vecs = tf.gather(x_vecs, idx1, axis=1)

    idx2 = tf.where(y_vals > eps_eig)[:, 0]
    y_vals = tf.gather(y_vals, idx2)
    y_vecs = tf.gather(y_vecs, idx2, axis=1)

    k11 = tf.matmul(
        tf.matmul(x_vecs,
                  tf.linalg.tensor_diag(tf.math.reciprocal(tf.sqrt(x_vals)))),
        tf.transpose(x_vecs))
    k22 = tf.matmul(
        tf.matmul(y_vecs,
                  tf.linalg.tensor_diag(tf.math.reciprocal(tf.sqrt(y_vals)))),
        tf.transpose(y_vecs))
    t = tf.matmul(tf.matmul(k11, cov_xy), k22)

    # Eigenvalues are sorted in increasing order.
    vals, _ = tf.linalg.eigh(tf.matmul(t, tf.transpose(t)))
    # Make sure none of the (small) eigenvalues are negative
    estimated_cca_dims = tf.reduce_sum(
        tf.sqrt(tf.math.maximum(0.0, vals[-dim:])))
    return estimated_cca_dims