示例#1
0
    def ffffffffffffffffffff(self, pred, target):
        '''
        输入矩形的格式是cx cy w h
        '''
        assert pred.shape[0] == target.shape[0]

        pred = L.reshape(pred, [-1, 4])
        target = L.reshape(target, [-1, 4])

        pred = L.cast(pred, 'float32')
        target = L.cast(target, 'float32')

        # 相交矩形左上角坐标
        tl = L.elementwise_max((pred[:, :2] - pred[:, 2:] / 2),
                               (target[:, :2] - target[:, 2:] / 2))
        # 相交矩形右下角坐标
        br = L.elementwise_min((pred[:, :2] + pred[:, 2:] / 2),
                               (target[:, :2] + target[:, 2:] / 2))

        area_p = paddle.prod(pred[:, 2:], 1)  # 预测框的面积
        area_g = paddle.prod(target[:, 2:], 1)  # gt框的面积

        # 相交矩形是否存在?
        # en = (tl < br).type(tl.type()).prod(dim=1)
        en = L.cast(tl < br, 'float32')
        en = paddle.prod(en, 1)  # 相交矩形是否存在?

        area_i = paddle.prod(br - tl, 1) * en
        area_u = area_p + area_g - area_i
        iou = (area_i) / (area_u + 1e-16)

        if self.loss_type == "iou":
            loss = 1 - iou**2
        elif self.loss_type == "giou":
            c_tl = L.elementwise_min((pred[:, :2] - pred[:, 2:] / 2),
                                     (target[:, :2] - target[:, 2:] / 2))
            c_br = L.elementwise_max((pred[:, :2] + pred[:, 2:] / 2),
                                     (target[:, :2] + target[:, 2:] / 2))
            area_c = paddle.prod(c_br - c_tl, 1)

            # area_c限制在区间[1e-16, np.inf]内
            area_c = L.clip(area_c, 1e-16, np.inf)
            giou = iou - (area_c - area_u) / area_c
            # giou限制在区间[-1.0, 1.0]内
            giou = L.clip(giou, -1.0, 1.0)
            loss = 1 - giou
        if self.reduction == "mean":
            loss = loss.mean()
        elif self.reduction == "sum":
            loss = loss.sum()

        return loss
示例#2
0
    def get_loss(self, loss_inputs):
        # cls loss
        score_tgt = paddle.cast(x=loss_inputs['rpn_score_target'],
                                dtype='float32')
        score_tgt.stop_gradient = True
        loss_rpn_cls = ops.sigmoid_cross_entropy_with_logits(
            input=loss_inputs['rpn_score_pred'], label=score_tgt)
        loss_rpn_cls = paddle.mean(loss_rpn_cls, name='loss_rpn_cls')

        # reg loss
        loc_tgt = paddle.cast(x=loss_inputs['rpn_rois_target'],
                              dtype='float32')
        loc_tgt.stop_gradient = True
        loss_rpn_reg = ops.smooth_l1(
            input=loss_inputs['rpn_rois_pred'],
            label=loc_tgt,
            inside_weight=loss_inputs['rpn_rois_weight'],
            outside_weight=loss_inputs['rpn_rois_weight'],
            sigma=3.0,
        )
        loss_rpn_reg = paddle.sum(loss_rpn_reg)
        score_shape = paddle.shape(score_tgt)
        score_shape = paddle.cast(score_shape, dtype='float32')
        norm = paddle.prod(score_shape)
        norm.stop_gradient = True
        loss_rpn_reg = loss_rpn_reg / norm

        return {'loss_rpn_cls': loss_rpn_cls, 'loss_rpn_reg': loss_rpn_reg}
示例#3
0
    def _compute_example_conditioned_expert_weights(self, routing_inputs):
        """Computes the example-conditioned weights for the experts.
        Args:
            routing_inputs: a tensor of shape=(batch_size, num_features) containing
            the input examples.
        Returns:
            A tuple: (expert_weights, selector_outputs).
            expert_weights is a tensor with shape=(batch_size, num_experts),
            containing the expert weights for each example in routing_inputs.
            selector_outputs is a tensor with
            shape=(batch_size, num_nonzero, num_experts), which contains the outputs
            of the single-expert selectors for all the examples in routing_inputs.
        """
        sample_logits = paddle.reshape(
            self._z_logits(routing_inputs),
            [-1, self._num_nonzeros, 1, self._num_binary])
        smooth_step_activations = self._smooth_step(sample_logits)

        # Shape = (batch_size, num_nonzeros, num_experts).
        selector_outputs = paddle.prod(paddle.where(
            paddle.unsqueeze(self._binary_codes, 0), smooth_step_activations,
            1 - smooth_step_activations),
                                       axis=3)

        # Weights for the single-expert selectors
        # Shape = (batch_size, num_nonzeros, 1)
        selector_weights = paddle.unsqueeze(self._w_logits(routing_inputs), 2)
        selector_weights = F.softmax(selector_weights, axis=1)

        # Sum over the signle-expert selectors. Shape = (batch_size, num_experts).
        expert_weights = paddle.sum(selector_weights * selector_outputs,
                                    axis=1)

        return expert_weights, selector_outputs
示例#4
0
    def __call__(self, x, index):
        if self.dim < 0:
            self.dim += len(x.shape)
        x_range = list(range(len(x.shape)))
        x_range[0] = self.dim
        x_range[self.dim] = 0
        x_swaped = paddle.transpose(x, perm=x_range)
        index_range = list(range(len(index.shape)))
        index_range[0] = self.dim
        index_range[self.dim] = 0
        index_swaped = paddle.transpose(index, perm=index_range)
        dtype = index.dtype

        x_shape = paddle.shape(x_swaped)
        index_shape = paddle.shape(index_swaped)

        prod = paddle.cast(paddle.prod(x_shape), dtype=dtype) / x_shape[0]

        x_swaped_flattend = paddle.flatten(x_swaped)
        index_swaped_flattend = paddle.flatten(index_swaped)
        index_swaped_flattend *= prod

        bias = paddle.arange(start=0, end=prod, dtype=dtype)
        bias = paddle.reshape(bias, x_shape[1:])
        bias = paddle.crop(bias, index_shape[1:])
        bias = paddle.flatten(bias)
        bias = paddle.tile(bias, [index_shape[0]])
        index_swaped_flattend += bias

        gathered = paddle.index_select(x_swaped_flattend, index_swaped_flattend)
        gathered = paddle.reshape(gathered, index_swaped.shape)

        out = paddle.transpose(gathered, perm=x_range)

        return out
示例#5
0
 def get_feature_by_coordinate(self, x, coord, offset_h, offset_w,
                               padded_x_w):
     x = paddle.reshape(x, [0, 0, -1])
     index = paddle.cast(
         coord[:, :, :, :self.N] * padded_x_w,
         dtype='int64') + coord[:, :, :, self.N:]  # offset_x*w + offset_y
     index = paddle.unsqueeze(index, 1)
     index = paddle.tile(index, [1, self.in_channel, 1, 1, 1])
     index = paddle.reshape(index, (0, 0, -1))
     x_range = list(range(3))
     dim = 2
     x_range[0] = dim
     x_range[dim] = 0
     x_swaped = paddle.transpose(x, perm=x_range)
     index_range = list(range(3))
     index_range[0] = dim
     index_range[dim] = 0
     index_swaped = paddle.transpose(index, perm=index_range)
     x_shape = layers.shape(x_swaped)
     index_shape = layers.shape(index_swaped)
     prod = paddle.prod(x_shape[1:], keepdim=True)
     x_swaped_flattend = paddle.reshape(x_swaped, [-1])
     index_swaped_flattend = paddle.reshape(index_swaped, [-1])
     index_swaped_flattend *= prod
     bias = paddle.arange(start=0, end=prod, step=1, dtype='float32')
     bias = paddle.tile(bias, index_shape[0])
     index_swaped_flattend += bias
     gathered = paddle.gather(x_swaped_flattend, index_swaped_flattend)
     gathered = paddle.reshape(gathered, layers.shape(index_swaped))
     x_offset = paddle.transpose(gathered, perm=x_range)
     x_offset = paddle.reshape(
         x_offset, (-1, self.in_channel, offset_h, offset_w, self.N))
     return x_offset
示例#6
0
    def run_imperative(self):
        input = paddle.to_tensor(self.input)
        dy_result = paddle.prod(input)
        expected_result = np.prod(self.input)
        self.assertTrue(np.allclose(dy_result.numpy(), expected_result))

        dy_result = paddle.prod(input, axis=1)
        expected_result = np.prod(self.input, axis=1)
        self.assertTrue(np.allclose(dy_result.numpy(), expected_result))

        dy_result = paddle.prod(input, axis=-1)
        expected_result = np.prod(self.input, axis=-1)
        self.assertTrue(np.allclose(dy_result.numpy(), expected_result))

        dy_result = paddle.prod(input, axis=[0, 1])
        expected_result = np.prod(self.input, axis=(0, 1))
        self.assertTrue(np.allclose(dy_result.numpy(), expected_result))

        dy_result = paddle.prod(input, axis=1, keepdim=True)
        expected_result = np.prod(self.input, axis=1, keepdims=True)
        self.assertTrue(np.allclose(dy_result.numpy(), expected_result))

        dy_result = paddle.prod(input, axis=1, dtype='int64')
        expected_result = np.prod(self.input, axis=1, dtype=np.int64)
        self.assertTrue(np.allclose(dy_result.numpy(), expected_result))

        dy_result = paddle.prod(input, axis=1, keepdim=True, dtype='int64')
        expected_result = np.prod(self.input,
                                  axis=1,
                                  keepdims=True,
                                  dtype=np.int64)
        self.assertTrue(np.allclose(dy_result.numpy(), expected_result))
示例#7
0
    def run_static(self, use_gpu=False):
        input = paddle.data(name='input', shape=[10, 10, 5], dtype='float32')
        result0 = paddle.prod(input)
        result1 = paddle.prod(input, axis=1)
        result2 = paddle.prod(input, axis=-1)
        result3 = paddle.prod(input, axis=[0, 1])
        result4 = paddle.prod(input, axis=1, keepdim=True)
        result5 = paddle.prod(input, axis=1, dtype='int64')
        result6 = paddle.prod(input, axis=1, keepdim=True, dtype='int64')

        place = paddle.CUDAPlace(0) if use_gpu else paddle.CPUPlace()
        exe = paddle.static.Executor(place)
        exe.run(paddle.static.default_startup_program())
        static_result = exe.run(feed={"input": self.input},
                                fetch_list=[
                                    result0, result1, result2, result3, result4,
                                    result5, result6
                                ])

        expected_result = np.prod(self.input)
        self.assertTrue(np.allclose(static_result[0], expected_result))
        expected_result = np.prod(self.input, axis=1)
        self.assertTrue(np.allclose(static_result[1], expected_result))
        expected_result = np.prod(self.input, axis=-1)
        self.assertTrue(np.allclose(static_result[2], expected_result))
        expected_result = np.prod(self.input, axis=(0, 1))
        self.assertTrue(np.allclose(static_result[3], expected_result))
        expected_result = np.prod(self.input, axis=1, keepdims=True)
        self.assertTrue(np.allclose(static_result[4], expected_result))
        expected_result = np.prod(self.input, axis=1, dtype=np.int64)
        self.assertTrue(np.allclose(static_result[5], expected_result))
        expected_result = np.prod(
            self.input, axis=1, keepdims=True, dtype=np.int64)
        self.assertTrue(np.allclose(static_result[6], expected_result))
示例#8
0
文件: __init__.py 项目: HighCWu/ddim
    def cumprod(x, axis=None):
        if axis is None:
            x = x.reshape([-1])
            axis = 0
        assert isinstance(axis, int)

        if axis < 0:
            axis = len(x.shape) + axis
        axis_length = x.shape[axis]
        mask = cumprod_mask(axis_length).reshape([
            *list([1] * axis), -1, axis_length,
            *list([1] * (len(x.shape) - axis - 1))
        ])
        x = x.unsqueeze(axis)
        x = x * mask.detach() + (paddle.ones_like(mask) * (1 - mask)).detach()

        return paddle.prod(x, axis=axis + 1)
示例#9
0
def gather_op(x, dim, index):

    dtype_mapping = {
        "VarType.INT32": "int32",
        "VarType.INT64": "int64",
        "paddle.int32": "int32",
        "paddle.int64": "int64"
    }
    if dim < 0:
        dim += len(x.shape)

    x_range = list(range(len(x.shape)))
    x_range[0] = dim
    x_range[dim] = 0
    x_swaped = paddle.transpose(x, perm=x_range)

    index_range = list(range(len(index.shape)))
    index_range[0] = dim
    index_range[dim] = 0
    index_swaped = paddle.transpose(index, perm=index_range)

    dtype = dtype_mapping[str(index.dtype)]
    x_shape = paddle.shape(x_swaped)
    index_shape = paddle.shape(index_swaped)
    prod = paddle.prod(x_shape, dtype=dtype) / x_shape[0]

    x_swaped_flattend = paddle.flatten(x_swaped)
    index_swaped_flattend = paddle.flatten(index_swaped)
    index_swaped_flattend *= prod

    bias = paddle.arange(start=0, end=prod, dtype=dtype)
    bias = paddle.reshape(bias, x_shape[1:])
    bias = paddle.crop(bias, index_shape[1:])
    bias = paddle.flatten(bias)
    bias = paddle.tile(bias, [index_shape[0]])

    index_swaped_flattend += bias

    gathered = paddle.index_select(x_swaped_flattend, index_swaped_flattend)
    gathered = paddle.reshape(gathered, index_swaped.shape)

    out = paddle.transpose(gathered, perm=x_range)

    return out
示例#10
0
    def _compute_expert_weights(self):
        """Computes the weight vector for the experts.
        Args: None.
        Returns:
          A tuple: (expert_weights, selector_outputs).
            expert_weights is the final weight vector of the experts.
            selector_outputs is a (num_nonzero, num_experts)-matrix whose i-th row
            represents the outputs of the i-th single-expert selector.
        """
        # Shape = (num_nonzero, 1, num_binary)
        smooth_step_activations = self._smooth_step(self._z_logits)

        # Shape = (num_nonzero, num_experts)
        selector_outputs = paddle.prod(paddle.where(
            self._binary_codes, smooth_step_activations,
            1 - smooth_step_activations),
                                       axis=2)

        # Weights for the single-expert selectors: shape = (num_nonzero, 1)
        selector_weights = F.softmax(self._w_logits, axis=0)
        expert_weights = paddle.sum(selector_weights * selector_outputs,
                                    axis=0)

        return expert_weights, selector_outputs
示例#11
0
def reduce_prod(name: str, x, axis=None, keepdim=False):
    import paddle
    paddle.enable_static()

    with paddle.static.program_guard(paddle.static.Program(),
                                     paddle.static.Program()):
        data_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype)
        out = paddle.prod(data_x, axis=axis, keepdim=keepdim)

        cpu = paddle.static.cpu_places(1)
        exe = paddle.static.Executor(cpu[0])

        # startup program will call initializer to initialize the parameters.
        exe.run(paddle.static.default_startup_program())
        outs = exe.run(feed={'x': x}, fetch_list=[out])
        saveModel(name,
                  exe,
                  feedkeys=['x'],
                  fetchlist=[out],
                  inputs=[x],
                  outputs=[outs[0]],
                  target_dir=sys.argv[1])

    return outs[0]
示例#12
0
def do_eval(args):
    paddle.set_device(args.device)
    model_class, tokenizer_class = MODEL_CLASSES["gpt"]
    tokenizer = tokenizer_class.from_pretrained(args.model_name)

    if args.init_checkpoint_path is not None:
        model = GPTForPretraining(
            GPTModel(
                **model_class.pretrained_init_configuration[args.model_name]))

        logger.info("Load model checkpoint from %s" %
                    args.init_checkpoint_path)
        model_dict = paddle.load(os.path.join(args.init_checkpoint_path))
        model.set_dict(model_dict)
    else:
        model = model_class.from_pretrained(args.model_name)

    tic_eval = time.time()
    eval_data_loader = create_eval_dataset(args)
    model.eval()
    total_score = 0
    score_name = "loss" if not args.cloze_eval else "number correct"
    with paddle.no_grad():
        for step, batch in enumerate(eval_data_loader):
            tokens, loss_mask, attention_mask, position_ids, labels = batch
            preds = model(tokens, position_ids, attention_mask)
            if not args.cloze_eval:
                masked_lm_loss = paddle.nn.functional.cross_entropy(
                    preds, labels, reduction="none")
                loss = paddle.sum(masked_lm_loss * loss_mask)
                total_score += loss.numpy() / (args.num_tokenized_tokens - 1)
            else:
                outputs = paddle.argmax(preds, -1)
                acc = paddle.cast(outputs == labels, 'float32')
                acc = paddle.where(paddle.cast(loss_mask, 'bool'), acc,
                                   paddle.ones_like(acc))
                acc = paddle.sum(paddle.prod(acc, -1))
                total_score += acc.numpy()
            if step % args.logging_steps == 0:
                logger.info(
                    "step %d, batch: %d, %s: %f, speed: %.2f step/s" %
                    (step, step, score_name, total_score, args.logging_steps /
                     (time.time() - tic_eval)))
                tic_eval = time.time()

    if not args.cloze_eval:
        total_loss = float(total_score)
        ppl = math.exp(min(20, total_loss))
        token_ratio = (args.num_tokenized_tokens -
                       1) / (args.num_original_tokens - 1)
        adjusted_ppl = math.exp(min(20, total_loss * token_ratio))
        string = ' validation results on {} | '.format(args.eval_path)
        string += 'avg loss: {:.4E} | '.format(total_loss)
        string += 'ppl: {:.4E} | '.format(ppl)
        string += 'adjusted ppl: {:.4E} | '.format(adjusted_ppl)
        string += 'token ratio: {} |'.format(token_ratio)
    else:
        num_correct = float(total_score)
        acc = float(num_correct / args.num_examples)
        string = ' validation results on {} | '.format(args.eval_path)
        string += 'number correct: {:.4E} | '.format(num_correct)
        string += 'total examples: {:.4E} | '.format(args.num_examples)
        string += 'avg accuracy: {:.4E}'.format(acc)
    logger.info(string)
示例#13
0
def taylor(M: int,
           nbar=4,
           sll=30,
           norm=True,
           sym: bool = True,
           dtype: str = 'float64') -> Tensor:
    """Compute a Taylor window.
    The Taylor window taper function approximates the Dolph-Chebyshev window's
    constant sidelobe level for a parameterized number of near-in sidelobes.
    Parameters:
        M(int): window size
        nbar, sil, norm: the window-specific parameter.
        sym(bool):whether to return symmetric window.
            The default value is True
        dtype(str): the datatype of returned tensor.
    Returns:
        Tensor: the window tensor
    Notes:
        This function is consistent with scipy.signal.windows.taylor().
    """
    if _len_guards(M):
        return paddle.ones((M, ), dtype=dtype)
    M, needs_trunc = _extend(M, sym)
    # Original text uses a negative sidelobe level parameter and then negates
    # it in the calculation of B. To keep consistent with other methods we
    # assume the sidelobe level parameter to be positive.
    B = 10**(sll / 20)
    A = _acosh(B) / math.pi
    s2 = nbar**2 / (A**2 + (nbar - 0.5)**2)
    ma = paddle.arange(1, nbar, dtype=dtype)

    Fm = paddle.empty((nbar - 1, ), dtype=dtype)
    signs = paddle.empty_like(ma)
    signs[::2] = 1
    signs[1::2] = -1
    m2 = ma * ma
    for mi in range(len(ma)):
        numer = signs[mi] * paddle.prod(1 - m2[mi] / s2 / (A**2 +
                                                           (ma - 0.5)**2))
        if mi == 0:
            denom = 2 * paddle.prod(1 - m2[mi] / m2[mi + 1:])
        elif mi == len(ma) - 1:
            denom = 2 * paddle.prod(1 - m2[mi] / m2[:mi])
        else:
            denom = 2 * paddle.prod(1 - m2[mi] / m2[:mi]) * paddle.prod(
                1 - m2[mi] / m2[mi + 1:])

        Fm[mi] = numer / denom

    def W(n):
        return 1 + 2 * paddle.matmul(
            Fm.unsqueeze(0),
            paddle.cos(2 * math.pi * ma.unsqueeze(1) * (n - M / 2. + 0.5) / M))

    w = W(paddle.arange(0, M, dtype=dtype))

    # normalize (Note that this is not described in the original text [1])
    if norm:
        scale = 1.0 / W((M - 1) / 2)
        w *= scale
    w = w.squeeze()
    return _truncate(w, needs_trunc)
示例#14
0
def atom37_to_torsion_angles(
    aatype: paddle.Tensor,  # (B, T, N)
    all_atom_pos: paddle.Tensor,  # (B, T, N, 37, 3)
    all_atom_mask: paddle.Tensor,  # (B, T, N, 37)
    placeholder_for_undefined=False,
) -> Dict[str, paddle.Tensor]:
    """Computes the 7 torsion angles (in sin, cos encoding) for each residue.

    The 7 torsion angles are in the order
    '[pre_omega, phi, psi, chi_1, chi_2, chi_3, chi_4]',
    here pre_omega denotes the omega torsion angle between the given amino acid
    and the previous amino acid.

    Args:
        aatype: Amino acid type, given as array with integers.
        all_atom_pos: atom37 representation of all atom coordinates.
        all_atom_mask: atom37 representation of mask on all atom coordinates.
        placeholder_for_undefined: flag denoting whether to set masked torsion
        angles to zero.
    Returns:
        Dict containing:
        * 'torsion_angles_sin_cos': Array with shape (B, N, 7, 2) where the final
            2 dimensions denote sin and cos respectively
        * 'alt_torsion_angles_sin_cos': same as 'torsion_angles_sin_cos', but
            with the angle shifted by pi for all chi angles affected by the naming
            ambiguities.
        * 'torsion_angles_mask': Mask for which chi angles are present.
    """

    # Map aatype > 20 to 'Unknown' (20).
    aatype = paddle.minimum(aatype, paddle.to_tensor([20], dtype='int32'))

    num_batch, num_temp, num_res = aatype.shape

    # Compute the backbone angles.
    pad = paddle.zeros([num_batch, num_temp, 1, 37, 3])
    prev_all_atom_pos = paddle.concat([pad, all_atom_pos[..., :-1, :, :]], axis=-3)

    pad = paddle.zeros([num_batch, num_temp, 1, 37])
    prev_all_atom_mask = paddle.concat([pad, all_atom_mask[..., :-1, :]], axis=-2)

    # For each torsion angle collect the 4 atom positions that define this angle.
    # shape (B, T, N, atoms=4, xyz=3)
    pre_omega_atom_pos = paddle.concat(
        [prev_all_atom_pos[..., 1:3, :],  # prev CA, C
        all_atom_pos[..., 0:2, :]  # this N, CA
        ], axis=-2)

    phi_atom_pos = paddle.concat(
        [prev_all_atom_pos[..., 2:3, :],  # prev C
        all_atom_pos[..., 0:3, :]  # this N, CA, C
        ], axis=-2)

    psi_atom_pos = paddle.concat(
        [all_atom_pos[..., 0:3, :],  # this N, CA, C
        all_atom_pos[..., 4:5, :]  # this O
        ], axis=-2)

    # Collect the masks from these atoms.
    # Shape [batch, n_temp, num_res]
    pre_omega_mask = (
    paddle.prod(prev_all_atom_mask[..., 1:3], axis=-1)  # prev CA, C
    * paddle.prod(all_atom_mask[..., 0:2], axis=-1))  # this N, CA
    phi_mask = (
    prev_all_atom_mask[..., 2]  # prev C
    * paddle.prod(all_atom_mask[..., 0:3], axis=-1))  # this N, CA, C
    psi_mask = (
    paddle.prod(all_atom_mask[..., 0:3], axis=-1) *  # this N, CA, C
    all_atom_mask[..., 4])  # this O

    # Collect the atoms for the chi-angles.
    # Compute the table of chi angle indices. Shape: [restypes, chis=4, atoms=4].
    chi_atom_indices = get_chi_atom_indices()

    # Select atoms to compute chis. Shape: [batch, num_temp, num_res, chis=4, atoms=4].
    atom_indices = utils.batched_gather(
        params=chi_atom_indices, indices=aatype, axis=0, batch_dims=0)

    # Gather atom positions. Shape: [batch, num_temp, num_res, chis=4, atoms=4, xyz=3].
    chis_atom_pos = utils.batched_gather(
        params=all_atom_pos, indices=atom_indices, axis=0,
        batch_dims=3)

    # Copy the chi angle mask, add the UNKNOWN residue. Shape: [restypes, 4].
    chi_angles_mask = list(residue_constants.chi_angles_mask)
    chi_angles_mask.append([0.0, 0.0, 0.0, 0.0])
    chi_angles_mask = paddle.to_tensor(chi_angles_mask)

    # Compute the chi angle mask. I.e. which chis angles exist according to the
    # aatype. Shape [batch, num_temp, num_res, chis=4].
    chis_mask = utils.batched_gather(params=chi_angles_mask, indices=aatype,
                                axis=0, batch_dims=0)
    # Constrain the chis_mask to those chis, where the ground truth coordinates of
    # all defining four atoms are available.
    # Gather the chi angle atoms mask. Shape: [batch, num_temp, num_res, chis=4, atoms=4].
    chi_angle_atoms_mask = utils.batched_gather(
        params=all_atom_mask, indices=atom_indices, axis=0,
        batch_dims=3)
    # Check if all 4 chi angle atoms were set. Shape: [batch, num_temp, num_res, chis=4].
    chi_angle_atoms_mask = paddle.prod(chi_angle_atoms_mask, axis=[-1])
    chis_mask = chis_mask * chi_angle_atoms_mask

    # Stack all torsion angle atom positions.
    # Shape (B, T, N, torsions=7, atoms=4, xyz=3)
    torsions_atom_pos = paddle.concat(
        [pre_omega_atom_pos[:, :, :, None, :, :],
        phi_atom_pos[:, :, :, None, :, :],
        psi_atom_pos[:, :, :, None, :, :],
        chis_atom_pos
        ], axis=3)

    # Stack up masks for all torsion angles.
    # shape (B, T, N, torsions=7)
    torsion_angles_mask = paddle.concat(
        [pre_omega_mask[..., None],
        phi_mask[..., None],
        psi_mask[..., None],
        chis_mask
        ], axis=-1)

    # Create a frame from the first three atoms:
    # First atom: point on x-y-plane
    # Second atom: point on negative x-axis
    # Third atom: origin
    # r3.Rigids (B, T, N, torsions=7)
    torsion_frames = r3.rigids_from_3_points(
        p_neg_x_axis=torsions_atom_pos[..., 1, :],
        origin=torsions_atom_pos[..., 2, :],
        p_xy_plane=torsions_atom_pos[..., 0, :])

    # Compute the position of the forth atom in this frame (y and z coordinate
    # define the chi angle)
    # r3.Vecs (B, T, N, torsions=7)
    forth_atom_rel_pos = r3.rigids_mul_vecs(
        r3.invert_rigids(torsion_frames),
        r3.vecs_from_tensor(torsions_atom_pos[..., 3, :]))

    # Normalize to have the sin and cos of the torsion angle.
    # jnp.ndarray (B, T, N, torsions=7, sincos=2)
    torsion_angles_sin_cos = paddle.stack(
        [forth_atom_rel_pos.z, forth_atom_rel_pos.y], axis=-1)
    torsion_angles_sin_cos /= paddle.sqrt(
        paddle.sum(paddle.square(torsion_angles_sin_cos), axis=-1, keepdim=True)
        + 1e-8)

    # Mirror psi, because we computed it from the Oxygen-atom.
    torsion_angles_sin_cos *= paddle.to_tensor(
        [1., 1., -1., 1., 1., 1., 1.])[None, None, None, :, None]

    # Create alternative angles for ambiguous atom names.
    chi_is_ambiguous = utils.batched_gather(
        paddle.to_tensor(residue_constants.chi_pi_periodic), aatype)
    # chi_is_ambiguous (B, T, N, torsions=4)
    mirror_torsion_angles = paddle.concat(
        [paddle.ones([num_batch, num_temp, num_res, 3]),
        1.0 - 2.0 * chi_is_ambiguous], axis=-1)
    # mirror_torsion_angles (B, T, N, torsions=7)
    alt_torsion_angles_sin_cos = (
        torsion_angles_sin_cos * mirror_torsion_angles[:, :, :, :, None])

    if placeholder_for_undefined:
        # Add placeholder torsions in place of undefined torsion angles
        # (e.g. N-terminus pre-omega)
        placeholder_torsions = paddle.stack([
            paddle.ones(torsion_angles_sin_cos.shape[:-1]),
            paddle.zeros(torsion_angles_sin_cos.shape[:-1])
        ], axis=-1)
        torsion_angles_sin_cos = torsion_angles_sin_cos * torsion_angles_mask[
            ..., None] + placeholder_torsions * (1 - torsion_angles_mask[..., None])
        alt_torsion_angles_sin_cos = alt_torsion_angles_sin_cos * torsion_angles_mask[
            ..., None] + placeholder_torsions * (1 - torsion_angles_mask[..., None])

    return {
        'torsion_angles_sin_cos': torsion_angles_sin_cos,  # (B, T, N, 7, 2)
        'alt_torsion_angles_sin_cos': alt_torsion_angles_sin_cos,  # (B, T, N, 7, 2)
        'torsion_angles_mask': torsion_angles_mask  # (B, T, N, 7)
    }
示例#15
0
def raw_reduce_prod(x, dim=[0], keep_dim=False):
    return paddle.prod(x, dim, keep_dim)