Exemplo n.º 1
0
 def ar_func(data):
     kmer_nn_0 = kmer_scale0*_normalize_layer(tf.nn.conv1d(data, filters, 1, 'VALID')) + kmer_intercept0
     kmer_nn_1 = (kmer_scale1*_normalize_layer(
         tf.tensordot(tf.nn.elu(kmer_nn_0), kmer_weights1, axes=[[-2, -1], [0, 1]]))
                  + kmer_intercept1)
     kmer_nn_2 = tf.tensordot(tf.nn.elu(kmer_nn_1), kmer_weights2, axes=[[-1], [0]]) + kmer_intercept2
     return tf.nn.softmax(kmer_nn_2)
Exemplo n.º 2
0
    def testBayesianLinearModel(self):
        """Tests that model makes reasonable predictions."""
        np.random.seed(42)
        train_batch_size = 5
        test_batch_size = 2
        num_features = 3
        noise_variance = 0.01
        coeffs = tf.range(num_features, dtype=tf.float32)
        features = tf.cast(np.random.randn(train_batch_size, num_features),
                           dtype=tf.float32)
        noise = tf.cast(np.random.randn(train_batch_size), dtype=tf.float32)
        labels = (tf.tensordot(features, coeffs, [[-1], [0]]) +
                  noise_variance * noise)

        model = ed.layers.BayesianLinearModel(noise_variance=noise_variance)
        model.fit(features, labels)

        test_features = np.random.randn(test_batch_size,
                                        num_features).astype(np.float32)
        test_labels = tf.tensordot(test_features, coeffs, [[-1], [0]])
        outputs = model(test_features)
        test_predictions = outputs.distribution.mean()
        test_predictions_variance = outputs.distribution.variance()

        self.assertEqual(test_predictions.shape, (test_batch_size, ))
        self.assertEqual(test_predictions_variance.shape, (test_batch_size, ))
        self.assertAllClose(test_predictions, test_labels, atol=0.1)
        self.assertAllLessEqual(test_predictions_variance, noise_variance)
Exemplo n.º 3
0
 def f(a, b):  # pylint: disable=missing-docstring
     return utils.cond(
         utils.logical_or(tf.rank(a) == 0,
                          tf.rank(b) == 0),
         lambda: a * b,
         lambda: utils.cond(  # pylint: disable=g-long-lambda
             tf.rank(b) == 1, lambda: tf.tensordot(a, b, axes=[[-1], [-1]]),
             lambda: tf.tensordot(a, b, axes=[[-1], [-2]])))
Exemplo n.º 4
0
 def f(x1, x2):
   try:
     return utils.cond(tf.rank(x2) == 1,
                       lambda: tf.tensordot(x1, x2, axes=1),
                       lambda: utils.cond(tf.rank(x1) == 1,  # pylint: disable=g-long-lambda
                                          lambda: tf.tensordot(  # pylint: disable=g-long-lambda
                                              x1, x2, axes=[[0], [-2]]),
                                          lambda: tf.matmul(x1, x2)))
   except tf.errors.InvalidArgumentError as err:
     six.reraise(ValueError, ValueError(str(err)), sys.exc_info()[2])
Exemplo n.º 5
0
def gabor_impulse_response(t: tf.Tensor, center: tf.Tensor,
                           fwhm: tf.Tensor) -> tf.Tensor:
    """Computes the gabor impulse response."""
    denominator = 1.0 / (tf.math.sqrt(2.0 * math.pi) * fwhm)
    gaussian = tf.exp(tf.tensordot(1.0 / (2. * fwhm**2), -t**2, axes=0))
    center_frequency_complex = tf.cast(center, tf.complex64)
    t_complex = tf.cast(t, tf.complex64)
    sinusoid = tf.math.exp(
        1j * tf.tensordot(center_frequency_complex, t_complex, axes=0))
    denominator = tf.cast(denominator, dtype=tf.complex64)[:, tf.newaxis]
    gaussian = tf.cast(gaussian, dtype=tf.complex64)
    return denominator * sinusoid * gaussian
 def true_log_joint(features, prior_precision, w, y):
     log_prob = tf.reduce_sum(input_tensor=tfd.Normal(
         loc=0., scale=tf.math.rsqrt(prior_precision)).log_prob(w))
     log_prob += tf.reduce_sum(input_tensor=tfd.Normal(
         loc=tf.tensordot(features, w, [[1], [0]]), scale=1.).log_prob(
             y))
     return log_prob
def boolean_mask(boxlist, indicator, fields=None, scope=None,
                 use_static_shapes=False, indicator_sum=None):
  """Select boxes from BoxList according to indicator and return new BoxList.

  `boolean_mask` returns the subset of boxes that are marked as "True" by the
  indicator tensor. By default, `boolean_mask` returns boxes corresponding to
  the input index list, as well as all additional fields stored in the boxlist
  (indexing into the first dimension).  However one can optionally only draw
  from a subset of fields.

  Args:
    boxlist: BoxList holding N boxes
    indicator: a rank-1 boolean tensor
    fields: (optional) list of fields to also gather from.  If None (default),
      all fields are gathered from.  Pass an empty fields list to only gather
      the box coordinates.
    scope: name scope.
    use_static_shapes: Whether to use an implementation with static shape
      gurantees.
    indicator_sum: An integer containing the sum of `indicator` vector. Only
      required if `use_static_shape` is True.

  Returns:
    subboxlist: a BoxList corresponding to the subset of the input BoxList
      specified by indicator
  Raises:
    ValueError: if `indicator` is not a rank-1 boolean tensor.
  """
  with tf.name_scope(scope, 'BooleanMask'):
    if indicator.shape.ndims != 1:
      raise ValueError('indicator should have rank 1')
    if indicator.dtype != tf.bool:
      raise ValueError('indicator should be a boolean tensor')
    if use_static_shapes:
      if not (indicator_sum and isinstance(indicator_sum, int)):
        raise ValueError('`indicator_sum` must be a of type int')
      selected_positions = tf.cast(indicator, dtype=tf.float32)
      indexed_positions = tf.cast(
          tf.multiply(
              tf.cumsum(selected_positions), selected_positions),
          dtype=tf.int32)
      one_hot_selector = tf.one_hot(
          indexed_positions - 1, indicator_sum, dtype=tf.float32)
      sampled_indices = tf.cast(
          tf.tensordot(
              tf.cast(tf.range(tf.shape(indicator)[0]), dtype=tf.float32),
              one_hot_selector,
              axes=[0, 0]),
          dtype=tf.int32)
      return gather(boxlist, sampled_indices, use_static_shapes=True)
    else:
      subboxlist = box_list.BoxList(tf.boolean_mask(boxlist.get(), indicator))
      if fields is None:
        fields = boxlist.get_extra_fields()
      for field in fields:
        if not boxlist.has_field(field):
          raise ValueError('boxlist must contain all specified fields')
        subfieldlist = tf.boolean_mask(boxlist.get_field(field), indicator)
        subboxlist.add_field(field, subfieldlist)
      return subboxlist
    def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
                                       num_end_samples, total_num_samples):
        """slices num_start_samples and last num_end_samples from input_tensor.

    Args:
      input_tensor: An int32 tensor of shape [N] to be sliced.
      num_start_samples: Number of examples to be sliced from the beginning
        of the input tensor.
      num_end_samples: Number of examples to be sliced from the end of the
        input tensor.
      total_num_samples: Sum of is num_start_samples and num_end_samples. This
        should be a scalar.

    Returns:
      A tensor containing the first num_start_samples and last num_end_samples
      from input_tensor.

    """
        input_length = tf.shape(input=input_tensor)[0]
        start_positions = tf.less(tf.range(input_length), num_start_samples)
        end_positions = tf.greater_equal(tf.range(input_length),
                                         input_length - num_end_samples)
        selected_positions = tf.logical_or(start_positions, end_positions)
        selected_positions = tf.cast(selected_positions, tf.float32)
        indexed_positions = tf.multiply(tf.cumsum(selected_positions),
                                        selected_positions)
        one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1,
                                      total_num_samples,
                                      dtype=tf.float32)
        return tf.cast(
            tf.tensordot(tf.cast(input_tensor, tf.float32),
                         one_hot_selector,
                         axes=[0, 0]), tf.int32)
Exemplo n.º 9
0
def dense(inputs, kernel, bias=None, activation=None, dtype=None):
  """Densely connected NN layer op.

  Args:
    inputs: `tf.Tensor` or `tf.SparseTensor`. Inputs to operation.
    kernel: `tf.Variable`. Matrix kernel.
    bias: (Optional) `tf.Variable`. Bias to add to outputs.
    activation: (Optional) 1-argument callable. Activation function to apply to
      outputs.
    dtype: (Optional) `tf.DType`. Dtype to cast `inputs` to.

  Returns:
    `tf.Tensor`. Output of dense connection.
  """
  if dtype:
    if inputs.dtype.base_dtype != dtype.base_dtype:
      inputs = tf.cast(inputs, dtype=dtype)

  rank = inputs.shape.rank
  if rank == 2 or rank is None:
    # We use embedding_lookup_sparse as a more efficient matmul operation for
    # large sparse input tensors. The op will result in a sparse gradient, as
    # opposed to sparse_ops.sparse_tensor_dense_matmul which results in dense
    # gradients. This can lead to sigfinicant speedups, see b/171762937.
    if isinstance(inputs, tf.SparseTensor):
      # We need to fill empty rows, as the op assumes at least one id per row.
      inputs, _ = tf.sparse.fill_empty_rows(inputs, 0)
      # We need to do some munging of our input to use the embedding lookup as a
      # matrix multiply. We split our input matrix into separate ids and weights
      # tensors. The values of the ids tensor should be the column indices of
      # our input matrix and the values of the weights tensor can continue to
      # the actual matrix weights. The column arrangement of ids and weights
      # will be summed over and does not matter. See the documentation for
      # sparse_ops.sparse_tensor_dense_matmul a more detailed explanation of the
      # inputs to both ops.
      ids = tf.SparseTensor(
          indices=inputs.indices,
          values=inputs.indices[:, 1],
          dense_shape=inputs.dense_shape)
      weights = inputs
      outputs = tf.nn.embedding_lookup_sparse(
          kernel, ids, weights, combiner="sum")
    else:
      outputs = tf.raw_ops.MatMul(a=inputs, b=kernel)
  # Broadcast kernel to inputs.
  else:
    outputs = tf.tensordot(inputs, kernel, [[rank - 1], [0]])
    # Reshape the output back to the original ndim of the input.
    if not tf.executing_eagerly():
      shape = inputs.shape.as_list()
      output_shape = shape[:-1] + [kernel.shape[-1]]
      outputs.set_shape(output_shape)

  if bias is not None:
    outputs = tf.nn.bias_add(outputs, bias)

  if activation is not None:
    outputs = activation(outputs)

  return outputs
Exemplo n.º 10
0
def logistic_regression(features):
  """Bayesian logistic regression, which returns labels given features."""
  coeffs = ed.MultivariateNormalDiag(
      loc=tf.zeros(features.shape[1]), name="coeffs")
  labels = ed.Bernoulli(
      logits=tf.tensordot(features, coeffs, [[1], [0]]), name="labels")
  return labels
 def linear_regression(features, prior_precision):
     w = ed.Normal(loc=0.,
                   scale=tf.math.rsqrt(prior_precision),
                   sample_shape=features.shape[1],
                   name="w")
     y = ed.Normal(loc=tf.tensordot(features, w, [[1], [0]]),
                   scale=1.,
                   name="y")
     return y
 def _mean(self):
     probs = self.probs
     outcomes = self.outcomes
     if dtype_util.is_integer(outcomes.dtype):
         if self._validate_args:
             outcomes = dist_util.embed_check_integer_casting_closed(
                 outcomes, target_dtype=probs.dtype)
         outcomes = tf.cast(outcomes, dtype=probs.dtype)
     return tf.tensordot(outcomes, probs, axes=[[0], [-1]])
Exemplo n.º 13
0
    def linear(self, x):
        """Computes logits by running x through a linear layer.

    Args:
      x: A float32 tensor with shape [..., hidden_size]
    Returns:
      float32 tensor with shape [..., vocab_size].
    """
        with tf.compat.v1.name_scope("presoftmax_linear"):
            logits = tf.tensordot(x, self.word_embeddings, [[-1], [1]])
        return logits
    def _state_gradient(self, zero_and_constraints, distribution):
        """Returns the gradient to apply to the internal state."""
        if self._regret_type == _EXTERNAL_REGRET_TYPE:
            return tf.cast(zero_and_constraints, distribution.dtype)
        else:
            # This assertion should always succeed, since we check regret_type in the
            # constructor.
            assert self._regret_type == _SWAP_REGRET_TYPE

            return tf.tensordot(tf.cast(zero_and_constraints,
                                        distribution.dtype),
                                distribution,
                                axes=0)
Exemplo n.º 15
0
def compute_mel_from_mag(mag,
                         sample_rate=16000,
                         lo_hz=0.0,
                         hi_hz=8000.0,
                         bins=64,
                         fft_size=2048):
    num_spectrogram_bins = tf.cast(tf.shape(mag)[-1], tf.int32)
    if not bins:
        bins = int(fft_size / 4) + 1
    linear_to_mel_matrix = tf.signal.linear_to_mel_weight_matrix(
        bins, num_spectrogram_bins, sample_rate, lo_hz, hi_hz)
    mel = tf.tensordot(mag, linear_to_mel_matrix, 1)
    mel.set_shape(mag.shape[:-1].concatenate(linear_to_mel_matrix.shape[-1:]))
    return mel
Exemplo n.º 16
0
def blur(mag):
    """blurr  a batch of spectrograms

  Args:
    mag (tf.Tensor): (batch, time, frequencies)

  Returns:
    blurred_mag (tf.Tensor): (batch, time, frequencies)
  """
    n_freqs = mag.shape[-1]
    freqs = tf.cast(tf.linspace(0, 1, n_freqs), tf.float32)
    dist = tf.abs(freqs[tf.newaxis] - freqs[:, tf.newaxis])
    sim = 1 - dist
    blurred = tf.tensordot(mag, sim, [[-1], [0]])
    return blurred
Exemplo n.º 17
0
def compute_mfcc_from_mag(
    mag,
    lo_hz=20.0,
    hi_hz=8000.0,
    mel_bins=128,
    mfcc_bins=13,
):
    """Calculate Mel Spectrogram."""
    num_spectrogram_bins = int(mag.shape[-1])
    linear_to_mel_matrix = tf.signal.linear_to_mel_weight_matrix(
        mel_bins, num_spectrogram_bins, 16000, lo_hz, hi_hz)
    mel = tf.tensordot(mag, linear_to_mel_matrix, 1)
    mel.set_shape(mag.shape[:-1].concatenate(linear_to_mel_matrix.shape[-1:]))
    logmel = safe_log(mel)
    mfccs = tf.signal.mfccs_from_log_mel_spectrograms(logmel)
    return mfccs[..., :mfcc_bins]
Exemplo n.º 18
0
def dot(a, b):
    """The dot product of two arrays. See numpy.dot for more details.

  This relies on `tf.tensordot` which does not support types int64 and float64.
  So arrays of those types are "unsafely" cast to int32 and float32.

  Args:
    a: array_like. Could be an ndarray, a Tensor or any object that can
      be converted to a Tensor using `tf.convert_to_tensor`.
    b: array_like. Could be an ndarray, a Tensor or any object that can
      be converted to a Tensor using `tf.convert_to_tensor`.

  Returns:
    An ndarray.
  """
    a, b = array_creation.promote_args_types(a, b)
    if utils.isscalar(a) or utils.isscalar(b):
        a = utils.tensor_to_ndarray(tf.expand_dims(a.data, -1))
        b = utils.tensor_to_ndarray(tf.expand_dims(b.data, -1))
        a_axis = b_axis = -1
    else:
        a_axis = -1
        # TODO(agarwal): handle ndim being None when in graph mode.
        b_axis = -2 if b.ndim > 1 else -1
    # TODO(srbs): When the shape of the output is a scalar e.g. when performing
    # a dot-product of two vectors, numpy returns a scalar object and not an
    # instance of ndarray.

    # tensordot/MatMul does not support int64 and float64 so we manually cast to
    # the compatible types. The conversion may be unsafe.
    # TODO(srbs): Figure out why MatMul does not support larger types.
    output_type = None
    if a.dtype == np.int64:
        logging.warning("Unsafe cast to int32.")
        a = utils.tensor_to_ndarray(tf.cast(a.data, tf.int32))
        b = utils.tensor_to_ndarray(tf.cast(b.data, tf.int32))
        output_type = tf.int64
    elif a.dtype == np.float64:
        logging.warning("Unsafe cast to float32.")
        a = utils.tensor_to_ndarray(tf.cast(a.data, tf.float32))
        b = utils.tensor_to_ndarray(tf.cast(b.data, tf.float32))
        output_type = tf.float64

    result_t = tf.tensordot(a.data, b.data, [[a_axis], [b_axis]])
    if output_type:
        result_t = tf.cast(result_t, output_type)
    return utils.tensor_to_ndarray(result_t)
Exemplo n.º 19
0
    def call(self,
             input_ids,
             seq_length,
             start_pos=0,
             token_type_ids=None,
             training=None):
        if input_ids is None:
            return None

        # subtoken embedding
        output = tf.nn.embedding_lookup(params=self.word_embeddings,
                                        ids=input_ids)

        if self.scale_emb:
            output = output * self.emb_dim**0.5

        if self.token_type_table is not None:
            # This vocab will be small so we always do one-hot here, since it is
            # always faster for a small vocabulary.
            one_hot_ids = tf.one_hot(token_type_ids,
                                     depth=self.num_token_types)
            token_type_embeddings = tf.tensordot(one_hot_ids,
                                                 self.token_type_table, 1)
            output += token_type_embeddings

        if self.position_embeddings is not None:
            # assert_op = tf.compat.v1.assert_less_equal(
            #     start_pos + seq_length, self.max_position_embeddings)
            # with tf.control_dependencies([assert_op]):
            # So `position_embeddings` is effectively an embedding table for
            # position [0, 1, 2, ..., max_position_embeddings-1], and the current
            # sequence has positions [0, 1, 2, ... seq_length-1], so we can just
            # perform a slice.
            position_embeddings = tf.slice(self.position_embeddings,
                                           [start_pos, 0],
                                           [seq_length, self.emb_dim])
            output += tf.expand_dims(position_embeddings, axis=0)

        if training and self.dropout_prob > 0:
            output = tf.nn.dropout(output, self.dropout_prob)
        return output
Exemplo n.º 20
0
def dense(inputs, kernel, bias=None, activation=None, dtype=None):
    """Densely connected NN layer op.

  Args:
    inputs: `tf.Tensor` or `tf.SparseTensor`. Inputs to operation.
    kernel: `tf.Variable`. Matrix kernel.
    bias: (Optional) `tf.Variable`. Bias to add to outputs.
    activation: (Optional) 1-argument callable. Activation function to apply to
      outputs.
    dtype: (Optional) `tf.DType`. Dtype to cast `inputs` to.

  Returns:
    `tf.Tensor`. Output of dense connection.
  """
    if dtype:
        if inputs.dtype.base_dtype != dtype.base_dtype:
            inputs = tf.cast(inputs, dtype=dtype)

    rank = inputs.shape.rank
    if rank == 2 or rank is None:
        if isinstance(inputs, tf.SparseTensor):
            outputs = tf.sparse.sparse_dense_matmul(inputs, kernel)
        else:
            outputs = tf.raw_ops.MatMul(a=inputs, b=kernel)
    # Broadcast kernel to inputs.
    else:
        outputs = tf.tensordot(inputs, kernel, [[rank - 1], [0]])
        # Reshape the output back to the original ndim of the input.
        if not tf.executing_eagerly():
            shape = inputs.shape.as_list()
            output_shape = shape[:-1] + [kernel.shape[-1]]
            outputs.set_shape(output_shape)

    if bias is not None:
        outputs = tf.nn.bias_add(outputs, bias)

    if activation is not None:
        outputs = activation(outputs)

    return outputs
Exemplo n.º 21
0
 def _matmul(self, inputs, kernel):
     if inputs.shape.ndims <= 2:
         return tf.matmul(inputs, kernel)
     # To handle broadcasting, we must use `tensordot`.
     return tf.tensordot(inputs, kernel, axes=[[-1], [0]])
Exemplo n.º 22
0
 def f(a, b):
     return utils.cond(utils.logical_or(tf.rank(a) == 0,
                                        tf.rank(b) == 0), lambda: a * b,
                       lambda: tf.tensordot(a, b, axes=[[-1], [-1]]))