Ejemplo n.º 1
0
def flush_and_chunk_episode(example_strings, class_ids, chunk_sizes):
    """Removes flushed examples from an episode and chunks it.

  This function:

  1) splits the batch of examples into a "flush" chunk and some number of
     additional chunks (as determined by `chunk_sizes`),
  2) throws away the "flush" chunk, and
  3) removes the padded dummy examples from the additional chunks.

  For example, in the context of few-shot learning, where episodes are composed
  of a support set and a query set, `chunk_size = (150, 100, 50)` would be
  interpreted as describing a "flush" chunk of size 150, a "support" chunk of
  size 100, and a "query" chunk of size 50.

  Args:
    example_strings: 1-D Tensor of dtype str, tf.train.Example protocol buffers.
    class_ids: 1-D Tensor of dtype int, class IDs (absolute wrt the original
      dataset).
    chunk_sizes: tuple of ints representing the sizes of the flush and
      additional chunks.

  Returns:
    A tuple of episode chunks of the form `((chunk_0_example_strings,
    chunk_0_class_ids), (chunk_1_example_strings, chunk_1_class_ids), ...)`.
  """
    example_strings_chunks = tf.split(example_strings,
                                      num_or_size_splits=chunk_sizes)[1:]
    class_ids_chunks = tf.split(class_ids, num_or_size_splits=chunk_sizes)[1:]

    return tuple(
        filter_dummy_examples(strings, ids)
        for strings, ids in zip(example_strings_chunks, class_ids_chunks))
Ejemplo n.º 2
0
 def _shift_and_log_scale_fn(x, output_dims, *args, **kwargs):
   del args
   del kwargs
   if output_dims != self.num_dims - num_masked:
     raise ValueError('Expected {} output_dims, got {}.'.format(
         self.num_dims - num_masked, output_dims))
   return tf.split(shift_and_log_scale_model(x), 2, axis=-1)
Ejemplo n.º 3
0
def fit_gaussian(embeddings, damping=1e-7, full_covariance=False):
  """Fits a unimodal Gaussian distribution to `embeddings`.

  Args:
    embeddings: A [batch_size, embedding_dim] tf.Tensor of embeddings.
    damping: The scale of the covariance damping coefficient.
    full_covariance: Whether to use a full or diagonal covariance.

  Returns:
    Parameter estimates (means and log variances) for a Gaussian model.
  """
  if full_covariance:
    num, dim = tf.split(tf.shape(input=embeddings), num_or_size_splits=2)
    num, dim = tf.squeeze(num), tf.squeeze(dim)
    sample_mean = tf.reduce_mean(input_tensor=embeddings, axis=0)
    centered_embeddings = embeddings - sample_mean
    sample_covariance = tf.einsum('ij,ik->kj', centered_embeddings,
                                  centered_embeddings)  # Outer product.
    sample_covariance += damping * tf.eye(dim)  # Positive definiteness.
    sample_covariance /= tf.cast(num, dtype=tf.float32)  # Scale by N.
    return sample_mean, sample_covariance
  else:
    sample_mean, sample_variances = tf.nn.moments(x=embeddings)
    log_variances = tf.math.log(sample_variances +
                                damping * tf.ones_like(sample_variances))
    return sample_mean, log_variances
Ejemplo n.º 4
0
def get_z(h, latent_shape, name="latent", dist="normal"):
    h = tf.layers.dense(h, latent_shape * 2, name=name, activation=None, reuse=tf.AUTO_REUSE)
    mu, log_sigma = tf.split(h, 2, axis=-1)
    sigma = 0.1 + 0.9 * tf.nn.softplus(log_sigma)
    if dist == "multi":
        return tf.contrib.distributions.MultivariateNormalDiag(mu, sigma)
    else:
        return tf.distributions.Normal(mu, sigma)
Ejemplo n.º 5
0
def _get_class_labels_and_predictions(labels, logits, num_classes,
                                      multi_label):
    """Returns list of per-class-labels and list of per-class-predictions.

  Args:
    labels: A `Tensor` of size [n, k]. In the
      multi-label case, values are either 0 or 1 and k = num_classes. Otherwise,
      k = 1 and values are in [0, num_classes).
    logits: A `Tensor` of size [n, `num_classes`]
      representing the logits of each pixel and semantic class.
    num_classes: Number of classes.
    multi_label: Boolean which defines if we are in a multi_label setting, where
      pixels can have multiple labels, or not.

  Returns:
    class_labels: List of size num_classes, where each entry is a `Tensor' of
      size [batch_size, height, width] of type float with values of 0 or 1
      representing the ground truth labels.
    class_predictions: List of size num_classes, each entry is a `Tensor' of
      size [batch_size, height, width] of type float with values of 0 or 1
      representing the predicted labels.
  """
    class_predictions = [None] * num_classes
    if multi_label:
        class_labels = tf.split(labels, num_or_size_splits=num_classes, axis=1)
        class_logits = tf.split(logits, num_or_size_splits=num_classes, axis=1)
        for c in range(num_classes):
            class_predictions[c] = tf.cast(tf.greater(class_logits[c], 0),
                                           dtype=tf.float32)
    else:
        class_predictions_flat = tf.argmax(logits, 1)
        class_labels = [None] * num_classes
        for c in range(num_classes):
            class_labels[c] = tf.cast(tf.equal(labels, c), dtype=tf.float32)
            class_predictions[c] = tf.cast(tf.equal(class_predictions_flat, c),
                                           dtype=tf.float32)
    return class_labels, class_predictions
  def call(self, observation, step_type=(), network_state=()):
    del step_type  # unused.
    output = tf.cast(tf.nest.flatten(observation)[0], tf.float32)
    for layer in self._mlp_layers:
      output = layer(output)

    shift, log_scale_diag = tf.split(output, 2, axis=-1)
    log_scale_diag = tf.clip_by_value(log_scale_diag, -20, 2)

    base_distribution = tfp.distributions.MultivariateNormalDiag(
        loc=shift,  scale_diag=tf.exp(log_scale_diag))
    distribution = SquashToSpecDistribution(
        base_distribution, self._single_action_spec)

    distribution = tf.nest.pack_sequence_as(self.output_spec, [distribution])
    return distribution, network_state
Ejemplo n.º 7
0
def preprocess_spatial_observation(input_obs, spec, categorical_embedding_dims=16, non_categorical_scaling='log'):
    with tf.name_scope('preprocess_spatial_obs'):
        features = Lambda(lambda x: tf.split(x, x.get_shape()[1], axis=1))(input_obs)

        for f in spec.features:
            if f.is_categorical:
                features[f.index] = Lambda(lambda x: tf.squeeze(x, axis=1))(features[f.index])
                features[f.index] = Embedding(f.scale, categorical_embedding_dims)(features[f.index])
                features[f.index] = Permute((3, 1, 2))(features[f.index])
            else:
                if non_categorical_scaling == 'log':
                    features[f.index] = Lambda(lambda x: tf.log(x + 1e-10))(features[f.index])
                elif non_categorical_scaling == 'normalize':
                    features[f.index] = Lambda(lambda x: x / f.scale)(features[f.index])

    return features
Ejemplo n.º 8
0
        def preprocess_observation(input_obs, spec):
            if spec.is_spatial:
                features = Lambda(
                    lambda x: tf.split(x, x.get_shape()[1], axis=1))(input_obs)

                for f in spec.features:
                    if f.type == FeatureType.CATEGORICAL:
                        features[f.index] = Lambda(
                            lambda x: one_hot_encode(x, f.scale))(
                                features[f.index])
                    else:
                        features[f.index] = Lambda(lambda x: x / f.scale)(
                            features[f.index])

                return features
            else:
                return input_obs
Ejemplo n.º 9
0
def fit_gaussian_mixture(embeddings,
                         responsibilities,
                         damping=1e-7,
                         full_covariance=False):
  """Fits a unimodal Gaussian distribution `embeddings`.

  Args:
    embeddings: A [batch_size, embedding_dim] tf.Tensor of embeddings.
    responsibilities: The per-component responsibilities.
    damping: The scale of the covariance damping coefficient.
    full_covariance: Whether to use a full or diagonal covariance.

  Returns:
    Parameter estimates for a Gaussian mixture model.
  """

  num, dim = tf.split(tf.shape(input=embeddings), num_or_size_splits=2)
  num, dim = tf.squeeze(num), tf.squeeze(dim)
  num_classes = responsibilities.shape[1]

  mixing_proportion = tf.einsum('jk->k', responsibilities)
  mixing_proportion /= tf.cast(num, dtype=tf.float32)
  mixing_logits = tf.math.log(mixing_proportion)

  sample_mean = tf.einsum('ij,ik->jk', responsibilities, embeddings)
  sample_mean /= tf.reduce_sum(
      input_tensor=responsibilities, axis=0)[:, tf.newaxis]
  centered_embeddings = (
      embeddings[:, tf.newaxis, :] - sample_mean[tf.newaxis, :, :])

  if full_covariance:
    sample_covariance = tf.einsum('ijk,ijl->ijkl', centered_embeddings,
                                  centered_embeddings)  # Outer product.
    sample_covariance += damping * tf.eye(dim)  # Positive definiteness.
    weighted_covariance = tf.einsum('ij,ijkl->jkl', responsibilities,
                                    sample_covariance)
    weighted_covariance /= tf.reduce_sum(
        input_tensor=responsibilities, axis=0)[:, tf.newaxis, tf.newaxis]

    return (
        _split_and_squeeze(sample_mean, num_splits=num_classes),
        _split_and_squeeze(weighted_covariance, num_splits=num_classes),
        [mixing_logits],
    )
  else:
    avg_x_squared = (
        tf.matmul(responsibilities, embeddings**2, transpose_a=True) /
        tf.reduce_sum(input_tensor=responsibilities, axis=0)[:, tf.newaxis])
    avg_means_squared = sample_mean**2
    avg_x_means = (
        sample_mean *
        tf.matmul(responsibilities, embeddings, transpose_a=True) /
        tf.reduce_sum(input_tensor=responsibilities, axis=0)[:, tf.newaxis])
    sample_variances = (
        avg_x_squared - 2 * avg_x_means + avg_means_squared +
        damping * tf.ones(dim))
    log_variances = tf.math.log(sample_variances)
    return (
        _split_and_squeeze(sample_mean, num_splits=num_classes),
        _split_and_squeeze(log_variances, num_splits=num_classes),
        [mixing_logits],
    )
Ejemplo n.º 10
0
def _split_and_squeeze(tensor, num_splits, axis=0):
  return [
      tf.squeeze(t)
      for t in tf.split(tensor, axis=axis, num_or_size_splits=num_splits)
  ]
Ejemplo n.º 11
0
 def _split_mode_params(params):
   return [
       tf.squeeze(p)
       for p in tf.split(params, axis=0, num_or_size_splits=self.num_modes)
   ]
    def discriminator(self, z):
        probabilities = self.discriminator_net(z)

        return tf.split(probabilities, 2, axis=-1)[0]