def test_wavefrontify(self, decorator, b):
        l1, l2, s = 14, 37, 9
        minval_open, maxval_open = 10.5, 11.5
        minval_extend, maxval_extend = 0.8, 1.2

        sim_mat = random_sim_mat(b, l1=l1, l2=l2, emb_dim=3)
        gap_open = random_gap_penalty(minval_open, maxval_open, b, l1, l2)
        gap_extend = random_gap_penalty(minval_extend, maxval_extend, b, l1,
                                        l2)
        w = alignment.weights_from_sim_mat(sim_mat, gap_open, gap_extend)

        wavefrontify_fn = tf_ops.wavefrontify
        unwavefrontify_fn = tf_ops.unwavefrontify
        if decorator is not None:
            wavefrontify_fn = decorator(wavefrontify_fn)
            unwavefrontify_fn = decorator(unwavefrontify_fn)

        w_wavefrontified = wavefrontify_fn(w)
        w_unwavefrontified = unwavefrontify_fn(w_wavefrontified)

        self.assertEqual(w_wavefrontified.shape, (l1 + l2 - 1, s, l1, b))
        self.assertAllEqual(w_unwavefrontified, w)
        for n in tf.range(b):
            for a in tf.range(s):
                for i in tf.range(l1):
                    for j in tf.range(l2):
                        self.assertEqual(w_wavefrontified[i + j, a, i, n],
                                         w[n, i, j, a])
Exemplo n.º 2
0
  def forward(sw_params):
    # Perturbs the Smith-Waterman LP parameters.
    sim_mat, gap_open, gap_extend = sw_params[0], sw_params[1], sw_params[2]
    sw_params = alignment.weights_from_sim_mat(sim_mat, gap_open, gap_extend)
    shape, dtype = tf.shape(sw_params), sw_params.dtype
    pert_shape = tf.concat([[num_samples], shape], axis=0)
    noise, noise_grad = tf.nest.map_structure(
        lambda t: tf.cast(t, dtype), noise_sampler(pert_shape))
    pert_sw_params = tf.expand_dims(sw_params, 0) + sigma * noise
    pert_sw_params = tf.reshape(pert_sw_params,
                                tf.concat([[-1], shape[1:]], axis=0))
    # Computes optimal Smith-Waterman scores and alignments.
    scores, paths = hard_sw_affine(pert_sw_params)

    # Average scores and outputs over random perturbations
    def perturbation_reshape(t):
      new_shape = tf.concat([[num_samples], [-1], tf.shape(t)[1:]], 0)
      return tf.reshape(t, new_shape)

    scores = perturbation_reshape(scores)  # [num_samples, batch]
    paths = perturbation_reshape(paths)  # [num_samples, batch, len1, len2, 9]
    pert_scores = tf.reduce_mean(scores, axis=0)
    pert_paths = tf.reduce_mean(paths, axis=0)
    if stop_paths_gradient:
      pert_paths = tf.stop_gradient(pert_paths)

    def grad(ds, dp):
      # Computes grad of scores w.r.t. (packed) sw_params.
      grad_scores = pert_paths * tf.reshape(ds, [tf.shape(ds)[0], 1, 1, 1])
      # Computes grad of scores w.r.t. sim_mat, gap_open and gap_extend.
      grad_scores = alignment.adjoint_weights_from_sim_mat(
          grad_scores, gap_open.shape, gap_extend.shape)
      if stop_paths_gradient:
        gradients = grad_scores
      else:
        # Flattens paths to shape [num_samples, batch, len1 * len2 * 9].
        flat_paths = tf.reshape(
            paths, [num_samples, tf.shape(paths)[1], -1])
        flat_noise_grad = tf.reshape(
            noise_grad, [num_samples, tf.shape(noise_grad)[1], -1])
        # Flattens dp to shape [num_samples, len1 * len2 * 9]
        flat_dp = tf.reshape(dp, [tf.shape(dp)[0], -1])
        # Computes grad of paths w.r.t. (packed) sw_params.
        grad_paths = tf.einsum('nbd,nb->bd', flat_noise_grad,
                               tf.einsum('nbd,bd->nb', flat_paths, flat_dp))
        grad_paths /= sigma * num_samples
        grad_paths = tf.reshape(grad_paths, shape)  # [batch, len1, len2, 9]
        # Computes grad of paths w.r.t. sim_mat, gap_open and gap_extend.
        grad_paths = alignment.adjoint_weights_from_sim_mat(
            grad_paths, gap_open.shape, gap_extend.shape)
        # Adds gradients w.r.t. scores and gradients w.r.t. paths.
        gradients = tf.nest.map_structure(
            lambda x, y: x + y, grad_scores, grad_paths)
      return gradients

    return (pert_scores, pert_paths), grad
  def test_smith_waterman_empty(self, decorator):
    smith_waterman_fn = tf_ops.hard_sw_affine
    if decorator is not None:
      smith_waterman_fn = decorator(smith_waterman_fn)
    tol = 1e-6

    single_sub = - 5*tf.ones((5, 5))
    toy_sub = tf.expand_dims(single_sub, 0)
    toy_gap_open = 0.03 * tf.ones((toy_sub.shape[0],))
    toy_gap_extend = 0.02 * tf.ones((toy_sub.shape[0],))
    w = alignment.weights_from_sim_mat(toy_sub, toy_gap_open, toy_gap_extend)

    values, paths = smith_waterman_fn(w, tol=tol)
    paths_squeeze = alignment.path_label_squeeze(paths)

    self.assertAllClose(values, [0.0], atol=2 * tol)
    self.assertAllEqual(paths_squeeze, tf.zeros([1, 5, 5], tf.float32))
Exemplo n.º 4
0
  def setUp(self):
    super().setUp()

    single_sub = 6*tf.eye(5) - 5*tf.ones((5, 5))
    second_sub = tf.tensor_scatter_nd_update(single_sub,
                                             indices=[[0, 0], [1, 1]],
                                             updates=[-5, -5])
    third_sub = tf.tensor_scatter_nd_update(single_sub,
                                            indices=[[0, 0], [2, 2]],
                                            updates=[-5, -5])
    fourth_sub = tf.tensor_scatter_nd_update(single_sub,
                                             indices=[[0, 0], [4, 4]],
                                             updates=[-5, -5])
    self._toy_sub = tf.stack([single_sub, second_sub, third_sub, fourth_sub])
    self._toy_gap_open = 0.03 * tf.ones((self._toy_sub.shape[0],))
    self._toy_gap_extend = 0.02 * tf.ones((self._toy_sub.shape[0],))
    self._weights = alignment.weights_from_sim_mat(self._toy_sub,
                                                   self._toy_gap_open,
                                                   self._toy_gap_extend)
  def test_smith_waterman_termination(self, decorator):
    smith_waterman_fn = tf_ops.hard_sw_affine
    if decorator is not None:
      smith_waterman_fn = decorator(smith_waterman_fn)
    tol = 1e-6

    single_sub = tf.concat([- 5*tf.ones((3, 1)),
                            6*tf.eye(3) - 5*tf.ones((3, 3))], 1)
    toy_sub = tf.expand_dims(single_sub, 0)
    toy_gap_open = 0.03 * tf.ones((toy_sub.shape[0],))
    toy_gap_extend = 0.02 * tf.ones((toy_sub.shape[0],))
    w = alignment.weights_from_sim_mat(toy_sub, toy_gap_open, toy_gap_extend)

    values, paths = smith_waterman_fn(w, tol=tol)
    paths_squeeze = alignment.path_label_squeeze(paths)

    self.assertAllClose(values, [3.0], atol=2 * tol)
    paths_squeeze_test = tf.convert_to_tensor([[[0., 1., 0., 0.],
                                                [0., 0., 2., 0.],
                                                [0., 0., 0., 2.]]], tf.float32)
    self.assertAllEqual(paths_squeeze, paths_squeeze_test)
Exemplo n.º 6
0
def unperturbed_alignment_score(sim_mat,
                                gap_open,
                                gap_extend):
  """Noiseless alignment score and paths from Smith-Waterman parameters."""
  sw_params = alignment.weights_from_sim_mat(sim_mat, gap_open, gap_extend)
  return hard_sw_affine(sw_params)