Exemplo n.º 1
0
  def FProp(self, theta, inputs):
    """Apply projection to inputs.

    Args:
      theta: A NestedMap object containing weights' values of this layer and its
        children layers.
      inputs: The inputs tensor.  Shaped [..., input_dims].

    Returns:
      Projected inputs.
    """
    p = self.params
    with tf.name_scope(p.name):
      computation_cost.Add(
          self, 'flops',
          tf.reduce_prod(tf.to_int64(tf.shape(inputs)[:-1])) * tf.to_int64(
              symbolic.EvalExpr(symbolic.TENSOR_VALUES,
                                p.input_dims * p.output_dims)) * 2)
      use_tpu = py_utils.use_tpu()
      if use_tpu and inputs.shape is not None and inputs.shape.rank < 26:
        # Avoids reshape if feasible and uses Einsum.
        if inputs.shape.rank == 2:
          return tf.matmul(inputs, theta.w)
        else:
          s = ''.join([chr(x) for x in range(97, 123)])  # abc...xyz
          r = inputs.shape.rank
          return tf.einsum('{0}y,yz->{0}z'.format(s[:r - 1]), inputs, theta.w)

      input_dim = py_utils.GetShape(inputs)[-1]
      act = tf.matmul(tf.reshape(inputs, [-1, input_dim]), theta.w)
      output_dim = tf.shape(theta.w)[-1]
      act = tf.reshape(act,
                       tf.concat([tf.shape(inputs)[:-1], [output_dim]], axis=0))
      return act
Exemplo n.º 2
0
 def _iter_body(i, mat_m, mat_h, unused_old_mat_h, error, unused_run_step):
     mat_m_i = (1 - alpha) * identity + alpha * mat_m
     new_mat_m = tf.matmul(mat_power(mat_m_i, exponent), mat_m)
     new_mat_h = tf.matmul(mat_h, mat_m_i)
     new_error = tf.reduce_max(tf.abs(new_mat_m - identity))
     return (i + 1, new_mat_m, new_mat_h, mat_h, new_error,
             new_error < error)
Exemplo n.º 3
0
        def _Atten(query, key, val):
            """Returns weighted val based on dot-attention between query and key."""
            b, n, _ = py_utils.GetShape(query)

            # Query.
            query = py_utils.HasShape(query, [b, n, hdims])
            query = tf.reshape(query, [b, n, heads, hdims // heads])
            query = tf.transpose(query, [0, 2, 1, 3])

            # Key.
            key = py_utils.HasShape(key, [b, n, hdims])
            key = tf.reshape(key, [b, n, heads, hdims // heads])
            key = tf.transpose(key, [0, 2, 1, 3])

            # query:[b, heads, n, hdims // heads]
            # key:  [b, heads, n, hdims // heads]^T
            dotp = tf.matmul(query, key, transpose_b=True)
            probs = tf.nn.softmax(dotp)
            probs = py_utils.HasShape(probs, [b, heads, n, n])

            # value (aka. context)
            val = py_utils.HasShape(val, [b, n, hdims])
            val = tf.reshape(val, [b, n, heads, hdims // heads])
            val = tf.transpose(val, [0, 2, 1, 3])
            val = py_utils.HasShape(val, [b, heads, n, hdims // heads])

            # Weighted average of value (context). [b, heads, n, hdims // heads]
            out = tf.matmul(probs, val)
            out = tf.transpose(out, [0, 2, 1, 3])
            out = tf.reshape(out, [b, n, hdims])
            return out
Exemplo n.º 4
0
def _MakeRotationMatrix(yaw, roll, pitch):
  """Create a 3x3 rotation matrix from yaw, roll, pitch (angles in radians).

  Note: Yaw -> Z, Roll -> X, Pitch -> Y.

  Args:
    yaw: float tensor representing a yaw angle in radians.
    roll: float tensor representing a roll angle in radians.
    pitch: float tensor representing a pitch angle in radians.

  Returns:
    A [3, 3] tensor corresponding to a rotation matrix.
  """

  # pyformat: disable
  def _UnitX(angle):
    return tf.reshape([1., 0., 0.,
                       0., tf.cos(angle), -tf.sin(angle),
                       0., tf.sin(angle), tf.cos(angle)],
                      shape=[3, 3])

  def _UnitY(angle):
    return tf.reshape([tf.cos(angle), 0., tf.sin(angle),
                       0., 1., 0.,
                       -tf.sin(angle), 0., tf.cos(angle)],
                      shape=[3, 3])

  def _UnitZ(angle):
    return tf.reshape([tf.cos(angle), -tf.sin(angle), 0.,
                       tf.sin(angle), tf.cos(angle), 0.,
                       0., 0., 1.],
                      shape=[3, 3])
  # pyformat: enable
  return tf.matmul(tf.matmul(_UnitZ(yaw), _UnitX(roll)), _UnitY(pitch))
Exemplo n.º 5
0
def CoordinateTransform(points, pose):
  """Translate 'points' to coordinates according to 'pose' vector.

  pose should contain 6 floating point values:
    translate_x, translate_y, translate_z: The translation to apply.
    yaw, roll, pitch: The rotation angles in radians.

  Args:
    points: Float shape [..., 3]: Points to transform to new coordinates.
    pose: Float shape [6]: [translate_x, translate_y, translate_z, yaw, roll,
      pitch]. The pose in the frame that 'points' comes from, and the defintion
      of the rotation and translation angles to apply to points.

  Returns:
    'points' transformed to the coordinates defined by 'pose'.
  """
  translate_x = pose[0]
  translate_y = pose[1]
  translate_z = pose[2]

  # Translate the points so the origin is the pose's center.
  translation = tf.reshape([translate_x, translate_y, translate_z], shape=[3])
  translated_points = points + translation

  # Define the rotation matrices for each of yaw, roll and pitch.
  #
  # The angle is expected to be in radians.
  # pyformat: disable
  def UnitX(angle):
    return tf.reshape([1., 0., 0.,
                       0., tf.cos(angle), -tf.sin(angle),
                       0., tf.sin(angle), tf.cos(angle)],
                      shape=[3, 3])

  def UnitY(angle):
    return tf.reshape([tf.cos(angle), 0., tf.sin(angle),
                       0., 1., 0.,
                       -tf.sin(angle), 0., tf.cos(angle)],
                      shape=[3, 3])

  def UnitZ(angle):
    return tf.reshape([tf.cos(angle), -tf.sin(angle), 0.,
                       tf.sin(angle), tf.cos(angle), 0.,
                       0., 0., 1.],
                      shape=[3, 3])
  # pyformat: enable

  # Compose the rotations along the three axes.
  #
  # Yaw->Z, Roll->X, Pitch->Y to match onboard logic:
  yaw = pose[3]
  roll = pose[4]
  pitch = pose[5]
  rotation_matrix = tf.matmul(tf.matmul(UnitZ(yaw), UnitX(roll)), UnitY(pitch))

  # Finally, rotate the points about the pose's origin according to the
  # rotation matrix.
  rotated_points = _BroadcastMatmul(translated_points, rotation_matrix)
  return rotated_points
Exemplo n.º 6
0
 def _iter_body(i, mat_y, unused_old_mat_y, mat_z, unused_old_mat_z, err,
                unused_old_err):
     """Iterative method to compute the square root of matrix."""
     current_iterate = 0.5 * (3.0 * identity - tf.matmul(mat_z, mat_y))
     current_mat_y = tf.matmul(mat_y, current_iterate)
     current_mat_z = tf.matmul(current_iterate, mat_z)
     # Compute the error in approximation.
     mat_sqrt_a = current_mat_y * tf.sqrt(norm)
     mat_a_approx = tf.matmul(mat_sqrt_a, mat_sqrt_a)
     residual = mat_a - mat_a_approx
     current_err = tf.sqrt(tf.reduce_sum(residual * residual)) / norm
     return i + 1, current_mat_y, mat_y, current_mat_z, mat_z, current_err, err
Exemplo n.º 7
0
 def inverse_pth_root(self, input_t, exponent, epsilon=1e-12):
   input_t_f64 = tf.cast(input_t, tf.float64)
   s, u, v = tf.linalg.svd(
       input_t_f64 +
       tf.eye(tf.shape(input_t_f64)[0], dtype=tf.float64) * epsilon,
       full_matrices=True)
   val = tf.matmul(
       tf.matmul(
           u,
           tf.linalg.tensor_diag(
               tf.pow(tf.maximum(s, epsilon), tf.cast(exponent, tf.float64)))),
       tf.transpose(v))
   return tf.cast(val, tf.float32), tf.reduce_max(tf.abs(u - v))
Exemplo n.º 8
0
    def _compute_preconditioned_raw_grad(self, var, partitioned_grads):
        """Returns preconditioned gradient.

    Args:
      var: tf.Variable associated with the gradient.
      partitioned_grads: Partitioned gradient tensor.

    Returns:
      A preconditioned gradient tensor.
    """

        partitioned_preconditioned_grads = []
        num_partitions = len(partitioned_grads)
        for pt_idx, pt_grad in enumerate(partitioned_grads):
            pt_shape = pt_grad.get_shape()
            rank = len(pt_shape)
            preconditioner_exists_for_dim = (
                self._preconditioner_available_for_dims(pt_shape))
            preconditioner_indices = self._preconditioner_indices(pt_shape)
            mat_preconditioner_list = []
            for i in range(rank):
                if preconditioner_exists_for_dim[i]:
                    mat_preconditioner_list.append(
                        self.get_slot(
                            var,
                            self._preconditioner_key_for_partition_and_dim(
                                i, pt_idx, num_partitions)))
            precond_grad = pt_grad
            if rank == 2 and all(preconditioner_exists_for_dim):
                # Fast path for speedup.
                precond_grad = tf.matmul(
                    tf.matmul(mat_preconditioner_list[0], precond_grad),
                    mat_preconditioner_list[1])
            else:
                for i in range(rank):
                    if preconditioner_exists_for_dim[i]:
                        precond_grad = tf.tensordot(
                            precond_grad,
                            mat_preconditioner_list[preconditioner_indices[i]],
                            axes=([0], [0]))
                    else:
                        # if preconditioner is not available we transpose it to
                        # permute the axis for the next preconditioner.
                        precond_grad = tf.transpose(precond_grad,
                                                    perm=list(range(1, rank)) +
                                                    [0])
            partitioned_preconditioned_grads.append(precond_grad)
        return TensorPartitioner.reform_tensor(
            partitioned_preconditioned_grads,
            self._partitioner_metadata[var].num_splits_per_dim)
Exemplo n.º 9
0
def TargetTransforms(original_transforms, target_transform):
    """Compute merged transforms from original 4x4 transforms and target.

  The original_transforms contains the transformation from car to world
  coordinates.  We then want to transform from world coordinates to the pose of
  the target frame (using the inverse pose transformation for the selected
  frame).

  Note that the original_transformations must transform to the same coordinate
  system as target_transform.

  We compose these transformations into a sequence of transformation matrices
  that converts from car coordinates of each frame to the car coordinates of the
  target frame (using batched matmul for efficiency).

  Args:
    original_transforms: A [num_frames, 4, 4] representing the original pose
      transform.
    target_transform: A [4, 4] tensor representing the target pose transform.

  Returns:
    A tensor of [num_frames, 4, 4] that can be applied to the original scenes
    to transform the points in those scenes to the target pose.
  """
    num_frames = py_utils.GetShape(original_transforms)[0]
    selected_pose_inv = tf.linalg.inv(target_transform)
    batched_pose_inv = tf.tile(selected_pose_inv[tf.newaxis, ...],
                               [num_frames, 1, 1])
    poses_in_target_frame = tf.matmul(batched_pose_inv, original_transforms)
    return poses_in_target_frame
Exemplo n.º 10
0
def FlatEmbeddingLookup(emb_table,
                        flat_ids,
                        vocab_size,
                        matmul_axis=1,
                        fprop_mode='matmul'):
    """Performs embedding lookup operation.

  Args:
    emb_table: tf.Tensor containing the embedding vectors.
    flat_ids: tf.Tensor of shape (number_ids,).
    vocab_size: vocabulary size of the embedding table, int.
    matmul_axis: the axis of flat_ids that is used for matmul, int.
    fprop_mode: embedding lookup option, should be 'matmul' or 'gather'.

  Returns:
    Embedding lookup result.
  """
    if fprop_mode == 'matmul':
        lhs = tf.equal(tf.expand_dims(flat_ids, matmul_axis),
                       tf.range(vocab_size, dtype=flat_ids.dtype))
        return tf.matmul(tf.cast(lhs, emb_table.dtype), emb_table)
    elif fprop_mode == 'gather':
        return tf.nn.embedding_lookup(emb_table, flat_ids)
    else:
        raise ValueError(
            'FlatEmbeddingLookup(): fprop_mode {} is not supported.'.format(
                fprop_mode))
Exemplo n.º 11
0
    def _MelSpectrogram(self, signal):
        """Computes the mel spectrogram from a waveform signal.

    Args:
      signal: f32 Tensor, shaped [batch_size, num_samples]

    Returns:
      f32 features Tensor, shaped [batch_size, num_frames, mel_channels]
    """
        p = self.params
        # FFT.
        real_frequency_spectrogram = tf.signal.rfft(signal, [self._fft_size])
        magnitude_spectrogram = tf.abs(real_frequency_spectrogram)

        # Shape of magnitude_spectrogram is num_frames x (fft_size/2+1)
        # Mel_weight is [num_spectrogram_bins, num_mel_bins]
        mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(
            num_mel_bins=p.num_bins,
            num_spectrogram_bins=self._fft_size // 2 + 1,
            sample_rate=p.sample_rate,
            lower_edge_hertz=p.lower_edge_hertz,
            upper_edge_hertz=p.upper_edge_hertz,
            dtype=tf.float32)
        # Weight matrix implemented in the magnitude domain.
        batch_size, num_frames, fft_channels = py_utils.GetShape(
            magnitude_spectrogram, 3)
        mel_spectrogram = tf.matmul(
            tf.reshape(magnitude_spectrogram,
                       [batch_size * num_frames, fft_channels]),
            mel_weight_matrix)
        mel_spectrogram = tf.reshape(mel_spectrogram,
                                     [batch_size, num_frames, p.num_bins])

        return mel_spectrogram
Exemplo n.º 12
0
    def __call__(self, context, inputs):
        p = self.params

        # context - [batch_size, context_size]
        # inputs - [batch_size, seq_len, input_size]

        # [batch_size, context_size] --> [batch_size, hidden_size]
        query = self.Wq(context)
        # [batch_size, seq_len, input_size] @ [input_size, hidden_size]
        # --> [batch_size, seq_len, hidden_size]
        keys = self.Wk(inputs)
        # [batch_size, seq_len, input_size] --> [batch_size, seq_len, hidden_size]
        values = self.Wv(inputs)
        # [batch_size, hidden_size] --> [batch_size, hidden_size, 1]
        query = tf.expand_dims(query, axis=2)
        # [batch_size, seq_len, hidden_size] @ [batch_size, hidden_size, 1]
        # --> [batch_size, seq_len, 1]
        logits = tf.matmul(keys, query)
        attention_weights = tf.nn.softmax(logits, axis=1)
        if p.scaled:
            attention_weights /= tf.sqrt(
                tf.cast(self.params.enc_units, tf.float32))

        context_vector = attention_weights * values
        context_vector = tf.reduce_sum(context_vector, axis=1)

        return context_vector, attention_weights
Exemplo n.º 13
0
def PointsToImagePlane(points, velo_to_image_plane):
    """Converts 3D points to the image plane.

  Args:
    points: A [N, 3] Floating point tensor containing xyz points. Points are
      assumed to be in velo coordinates.
    velo_to_image_plane: A [3, 4] matrix from velo xyz to image plane xy. After
      multiplication, you need to divide by last coordinate to recover 2D pixel
      locations.

  Returns:
    A [N, 2] Floating point tensor containing points in the image plane.
  """
    points = py_utils.HasRank(points, 2)
    num_points = tf.shape(points)[0]
    points = py_utils.HasShape(points, [num_points, 3])
    velo_to_image_plane = py_utils.HasShape(velo_to_image_plane, [3, 4])

    # Add homogenous coordinates to points.
    points = tf.concat([points, tf.ones((num_points, 1))], axis=-1)

    # Perform projection and divide by last coordinate to recover 2D pixel
    # locations.
    points_image = tf.matmul(points, velo_to_image_plane, transpose_b=True)
    points_image = points_image[:, :2] / points_image[:, 2:3]

    points_image = py_utils.HasShape(points_image, [num_points, 2])
    return points_image
Exemplo n.º 14
0
    def _True(anchor, bboxes):
        """True branch when num of bboxes is non-zero."""
        n = tf.shape(bboxes)[0]
        centroid = BBoxesCentroid(bboxes)

        # Computed dot products between centroid and the anchor point.
        dot = tf.squeeze(tf.matmul(centroid, tf.expand_dims(anchor, 1)),
                         axis=1)

        # Normalize dot to get the cosine of the angles.
        norm = tf.norm(anchor) * tf.norm(centroid, axis=1)
        cosine = tf.where(tf.greater(norm, 0), dot / norm,
                          tf.zeros([n], norm.dtype))

        # Disambiguates the angle anchor--O--point is positive or negative by the
        # sign of cross products between angle and points.  tf.linalg.cross takes
        # 3-vector (x, y, z), so we set z to 0.  tf.linalg.cross does not support
        # broadcasting, so we tile anchor to shape [n, 3].
        cross = tf.linalg.cross(
            tf.tile(tf.pad(tf.expand_dims(anchor, 0), [[0, 0], [0, 1]]),
                    [n, 1]), tf.pad(centroid, [[0, 0], [0, 1]]))

        # If the sign is positive, the points lie on the clockwise side of
        # O-->anchor. Hence, -1 - cosine moves the cosine values to [-2, 0].  If the
        # sign is negative, the points lie on the counter-clockwise side of
        # O-->anchor. 1 + cosine moves the cosine values to [0, 2].
        #
        # The car dataset shows that the points are scanned in the counter-clockwise
        # fashion. Therefore, top-k orders the points in the same order in which
        # bboxes appears in the spin.
        score = tf.where(tf.greater(cross, 0)[:, 2], -1 - cosine, 1 + cosine)

        _, indices = tf.nn.top_k(score, n, sorted=True)
        return indices
Exemplo n.º 15
0
 def _rotate(bbox, theta):
     rotation_matrix = tf.reshape(
         [tf.cos(theta), -tf.sin(theta),
          tf.sin(theta),
          tf.cos(theta)],
         shape=(2, 2))
     return tf.matmul(bbox, rotation_matrix)
    def _MemoryNeigh(self, spellings, pronunciations, enc_out, theta,
                     batch_size):
        p = self.params
        # Take the last embedding from the encoder output as the query to the
        # neighbour lookup.
        # [batch_size, emb_size]
        # TODO(llion): Add projection?
        query = enc_out.encoded[-1, :, :]

        # Process the neighbours to get the keys
        spellings = tf.reshape(spellings, (batch_size * p.max_neighbors, -1))
        pronunciations = tf.reshape(pronunciations,
                                    (batch_size * p.max_neighbors, -1))

        spell_inp = py_utils.NestedMap({
            "ids":
            spellings,
            "paddings":
            self._GetPaddings(spellings, dtype=tf.int32),
        })

        pron_inp = py_utils.NestedMap({
            "ids":
            pronunciations,
            "paddings":
            self._GetPaddings(pronunciations, dtype=tf.int32),
        })

        spell_enc_out = self.spell_encoder.FProp(theta.spell_encoder,
                                                 spell_inp)
        pron_enc_out = self.spell_encoder.FProp(theta.pron_encoder, pron_inp)

        spell_enc = tf.reshape(
            spell_enc_out["encoded"],
            (p.max_spelling_len, batch_size, p.max_neighbors, p.enc_units))
        # [batch_size, max_neighbors, enc_units]
        spell_keys = spell_enc[-1, :, :, :]

        # TODO(llion): Output the neighbour directly?
        pron_entries = tf.reshape(pron_enc_out["encoded"],
                                  (p.max_pronunciation_len, batch_size,
                                   p.max_neighbors, p.enc_units))

        # Compute attention
        # [batch_size, max_neighbors, emb_size] @ [batch_size, emb_size, 1] -->
        # [batch_size, max_neighbors, 1]
        key_logits = tf.matmul(spell_keys, tf.expand_dims(query, axis=-1))
        key_prob = tf.nn.softmax(key_logits)

        # [batch_size, max_neighbors, max_pronunciation_len, enc_units]
        pron_entries = tf.transpose(pron_entries, (1, 2, 0, 3))

        weighted_pron = tf.expand_dims(key_prob, axis=-1) * pron_entries
        # --> [max_pronunciation_len, batch_size, enc_units]
        weighted_pron = tf.transpose(tf.reduce_sum(weighted_pron, axis=1),
                                     (1, 0, 2))
        padding = tf.zeros((p.max_pronunciation_len, batch_size))

        return [weighted_pron], [padding]
Exemplo n.º 17
0
 def _ExpandedSquaredDistanceMatrix(pa, pb):
   squared_pa = tf.reduce_sum(tf.square(pa), axis=2, keepdims=True)
   squared_pb = tf.transpose(
       tf.reduce_sum(tf.square(pb), axis=2, keepdims=True), perm=[0, 2, 1])
   # We have observed that entries may < 0. when using the expanded version.
   # The max operation guards that from being possible.
   return tf.maximum(
       squared_pa - 2 * tf.matmul(pa, pb, transpose_b=True) + squared_pb, 0.0)
Exemplo n.º 18
0
 def MergeFn(xs):
     result = []
     for x in zip(*xs):
         val = x[0]
         for v in x[1:]:
             val = tf.matmul(val, v)
         result.append(val)
     return tuple(result)
Exemplo n.º 19
0
  def _PointConvParametricConv(self,
                               name,
                               mlp_dims,
                               num_in_channels,
                               num_out_channels):
    """Parametric convolution based on PointConv.

    Note that this layer assumes that the features have already been weighted
    by density. This layer follows Figure 5. in [1] but without the initial
    inverse density scale weighting part. When working with range images,
    one can use RIScaleFeaturesByDensity to scale the features beforehand.

    [1] PointConv: Deep Convolutional Networks on 3D Point Clouds. CVPR 2019.
        Wu, Wenxuan and Qi, Zhongang and Fuxin, Li.

    Args:
      name: string name for this layer.
      mlp_dims: dims for mlp applied to the points. the first dimension of
        mlp_dims must be 3.
      num_in_channels: integer number of input channels (features)
      num_out_channels: integer number of output channels (features).

    Returns:
      Params for a parametric conv layer.
    """
    if mlp_dims[0] != 3:
      raise ValueError(
          'First dimension of mlp_dims must be 3. mlp_dims={}'.format(mlp_dims))

    def _CombineLastTwoDims(x):
      shape = py_utils.GetShape(x)
      return tf.reshape(x, shape[:-2] + [np.prod(shape[-2:])])

    return self._Seq(
        name,
        self._Par(
            'transpose_matmul',
            lambda x, y: tf.matmul(x, y, transpose_a=True),
            # Features [..., points, num_in_channels].
            self._GetValue('get_features', 'features'),
            self._Seq(
                'transform_points',
                # Map points into features to use FeaturesMLP which is padding
                # batchnorm aware.
                self._SeqToKey(
                    'points_as_features', 'features',
                    self._GetValue('get_points', 'points')),
                self._FeaturesMLP('points_mlp', mlp_dims),
                # Output of this should be [..., points, mlp_dims[-1]].
                self._GetValue('get_transformed_points', 'features'))),
        # Post transform_matmul, should be [..., num_in_channels, mlp_dims[-1]].
        # Note: The paper's use of conv is equivalent to reshaping so that the
        # last two dims are combined together.
        self._ApplyFn('reshape', fn=_CombineLastTwoDims),
        # TODO(jngiam): Consider handling batch norm carefully here, not all
        # center points have valid values.
        self._FC('fc', num_in_channels * mlp_dims[-1], num_out_channels))
Exemplo n.º 20
0
 def _generalized_inverse_pth_root(self, input_t, exponent, epsilon=1e-12):
     input_t_f64 = tf.cast(input_t, tf.float64)
     s, u, v = tf.linalg.svd(
         input_t_f64 +
         tf.eye(tf.shape(input_t_f64)[0], dtype=tf.float64) * epsilon,
         full_matrices=True)
     inv_s = tf.reshape(
         tf.pow(tf.maximum(s, epsilon), tf.cast(exponent, tf.float64)),
         [1, -1])
     val = tf.matmul(u * inv_s, v, adjoint_b=True)
     return tf.cast(val, tf.float32), tf.reduce_max(tf.abs(u - v))
Exemplo n.º 21
0
    def _MixWithProjectedInput(self, theta, state0, inputs):
        """Computes _Mix() with inputs already projected.

    Args:
      theta: a NestedMap of layer weights. Notably, it's expected to contain
        separate weight tensors for input and hidden state projections, for
        performance reasons, under the key 'wm_i' (input) and 'wm_h'
      state0: A NestedMap with the same structure as return value of
        `self.zero_state()`.
      inputs: A Tensor of shape [batch, 4 * hidden_dim].

    Returns:
      A Tensor of the same shape as `inputs`.
    """
        proj_m = tf.matmul(state0.m, theta.wm_h)
        return inputs + proj_m
Exemplo n.º 22
0
def _BroadcastMatmul(x, y):
    """Broadcast y and matmul with x.

  Args:
    x: A tensor of shape [..., b].
    y: A matrix of shape [b, c].

  Returns:
    Tensor: ``z[..., c]``, where ``z[i..., :] = matmul(x[i..., :], y)``
  """
    y = py_utils.HasRank(y, 2)
    x_reshaped = tf.reshape(x, [-1, tf.shape(x)[-1]])
    result = tf.matmul(x_reshaped, y)
    return tf.reshape(
        result,
        tf.concat([tf.shape(x)[:-1], tf.shape(y)[-1:]], axis=0))
Exemplo n.º 23
0
    def FProp(self, theta, x, y):
        """Computes pair-wise dot product similarity.

    Args:
      theta: NestedMap of variables belonging to this layer and its children.
      x: batch of encoded representations from modality x. A float32 Tensor of
        shape [x_batch_size, encoded_dim]
      y: batch of encoded representations from modality y. A float32 Tensor of
        shape [y_batch_size, encoded_dim]

    Returns:
      Pairwise dot products. A float32 Tensor with shape
      `[x_batch_size, y_batch_size]`.
    """

        return tf.matmul(x, y, transpose_b=True)
Exemplo n.º 24
0
    def GetMixResult(cls, theta, concat, lstmobj):  # pylint:disable=unused-argument
        """Compute the mix result.

    Args:
      theta: a theta object in the LSTM cells;
      concat: Tensor, concat of previous output and current state vector;
      lstmobj: a LSTM cell object.

    Returns:
      result Tensor.

    Raises:
      NotImplementedError if prune_option is not 'weight',
      'first_order_gradient', or 'second_order_gradient'.
    """
        return tf.matmul(
            concat,
            lstmobj.QWeight(tf.multiply(theta.wm, theta.mask,
                                        'masked_weight')))
Exemplo n.º 25
0
def TransformPoints(points, transforms):
    """Apply 4x4 transforms to a set of points.

  Args:
    points: A [..., num_points, 3] tensor of xyz point locations.
    transforms: A [..., 4, 4] tensor with the same leading shape as points.

  Returns:
    A tensor with the same shape as points, transformed respectively.
  """
    # Create homogeneous coordinates for points.
    points = tf.concat([points, tf.ones_like(points[..., :1])], axis=-1)

    # Apply transformations, and divide by last axis to project back to 3D-space.
    # Transpose the transforms since the transformation is usually expected to
    # be applied such that new_points = T * current_point.
    points = tf.matmul(points, transforms, transpose_b=True)
    points = points[..., :3] / points[..., 3:]

    return points
Exemplo n.º 26
0
def MatmulGather(source, indices):
    """Drop in replacement for tf.gather_nd() optimized for speed on TPU.

  TODO(weihan): tf.gather_nd() is supposed to be implemented in the same way
  on TPU. Investigate why it's much slower.

  Args:
    source: tensor of shape [N, P1, C]
    indices: tensor of shape [N, P2, K]

  Returns:
    tensor of shape [N, P2, K, C]
  """
    source = py_utils.HasRank(source, 3)
    n, p1, c = py_utils.GetShape(source)
    indices = py_utils.HasShape(indices, [n, -1, -1])
    _, p2, k = py_utils.GetShape(indices)

    onehot = tf.one_hot(indices, depth=p1)  # N x P2 x K x P1
    reshaped = tf.reshape(onehot, [n, -1, p1])  # N x (P2 x K) x P1
    target = tf.matmul(reshaped, source)  # N x (P2 x K) x C
    return tf.reshape(target, [n, p2, k, c])
Exemplo n.º 27
0
  def _CreateCanvasAndTargets(self, batch):
    # pyformat: disable
    """Create the canvas and targets.

    Args:
      batch: A `.NestedMap`.

        - src: A `.NestedMap`.
          - ids: The source ids, ends in <eos>.
          - paddings: The source paddings.

        - tgt: A `.NestedMap`.
          - ids: The target ids, ends in <eos>.
          - paddings: The target paddings.

    Returns:
      A `NestedMap`.
        - canvas: The canvas (based off of the `rollin_policy`) of shape
          [batch_size, c_dim].
        - canvas_paddings: The paddings of `canvas_indices`.
        - target_indices: The target indices (i.e., use these indices to
          tf.gather_nd the log-probs). Optional, only during training.
        - target_weights: The target weights. Optional, only during training.
    """
    # pyformat: enable
    p = self.params

    if not p.is_eval:
      # Sample our src and tgt canvas.
      src_descriptor = self._SampleCanvasAndTargets(batch.src.ids,
                                                    batch.src.paddings)
      tgt_descriptor = self._SampleCanvasAndTargets(batch.tgt.ids,
                                                    batch.tgt.paddings)

      # Offset the src ids (to unshare embeddings between src/tgt). Note, we
      # only offset the canvas ids, but we do not offset the vocab ids. This
      # will result in unshared embeddings, but shared softmax. This is due to
      # GPU/TPU memory limitations, empirically it is known that unsharing
      # everything results in better performance.
      vocab_size = p.decoder.softmax.num_classes
      src_descriptor.canvas = tf.where(
          tf.equal(src_descriptor.canvas_paddings, 0),
          src_descriptor.canvas + vocab_size, src_descriptor.canvas)

      # Offset the tgt indices (need shift according to src length).
      batch_size = py_utils.GetShape(batch.src.ids)[0]
      # `target_batch` is a [num_targets, batch_size] tensor where each row
      # identifies which batch the target belongs to. Note the observation that,
      # tf.reduce_sum(target_batch, 1) == 1 \forall rows.
      target_batch = tf.cast(
          tf.equal(
              tf.expand_dims(tf.range(batch_size), 0),
              tf.expand_dims(tgt_descriptor.target_indices[:, 0], 1)), tf.int32)
      src_lens = tf.cast(
          tf.reduce_sum(1 - src_descriptor.canvas_paddings, 1), tf.int32)
      # `tgt_offset` is shape [num_targets] where each entry corresponds to the
      # offset needed for that target (due to the source length).
      tgt_offset = tf.matmul(target_batch, tf.expand_dims(src_lens, 1))
      # We shift the tgt slot without touching the batch or vocab.
      tgt_descriptor.target_indices += tf.concat(
          [tf.zeros_like(tgt_offset), tgt_offset,
           tf.zeros_like(tgt_offset)], 1)

      # The canvas is simply the sequence-level concat of the src and tgt.
      canvas, canvas_paddings = insertion.SequenceConcat(
          src_descriptor.canvas, src_descriptor.canvas_paddings,
          tgt_descriptor.canvas, tgt_descriptor.canvas_paddings)
      target_indices = tf.concat(
          [src_descriptor.target_indices, tgt_descriptor.target_indices], 0)
      target_weights = tf.concat(
          [src_descriptor.target_weights, tgt_descriptor.target_weights], 0)

      return py_utils.NestedMap(
          canvas=canvas,
          canvas_paddings=canvas_paddings,
          target_indices=target_indices,
          target_weights=target_weights)
Exemplo n.º 28
0
 def _unrolled_mat_pow_8(mat_m):
     """Computes mat_m^4."""
     mat_pow_4 = _unrolled_mat_pow_4(mat_m)
     return tf.matmul(mat_pow_4, mat_pow_4)
Exemplo n.º 29
0
 def _unrolled_mat_pow_2(mat_m):
     """Computes mat_m^2."""
     return tf.matmul(mat_m, mat_m)
Exemplo n.º 30
0
 def ParFn(*xs):
     result = xs[0]
     for v in xs[1:]:
         result = tf.matmul(result, v)
     return result