Example #1
0
    def forward(self, input, label):
        feat_norm = paddle.sqrt(
            paddle.sum(paddle.square(input), axis=1, keepdim=True))
        input = paddle.divide(input, feat_norm)

        weight_norm = paddle.sqrt(
            paddle.sum(paddle.square(self.weight), axis=0, keepdim=True))
        weight = paddle.divide(self.weight, weight_norm)

        logits = paddle.matmul(input, weight)
        if not self.training or label is None:
            return logits

        alpha_p = paddle.clip(-logits.detach() + 1 + self.margin, min=0.)
        alpha_n = paddle.clip(logits.detach() + self.margin, min=0.)
        delta_p = 1 - self.margin
        delta_n = self.margin

        m_hot = F.one_hot(label.reshape([-1]), num_classes=logits.shape[1])

        logits_p = alpha_p * (logits - delta_p)
        logits_n = alpha_n * (logits - delta_n)
        pre_logits = logits_p * m_hot + logits_n * (1 - m_hot)
        pre_logits = self.scale * pre_logits

        return pre_logits
Example #2
0
    def forward(self, input, label=None):
        input_norm = paddle.sqrt(
            paddle.sum(paddle.square(input), axis=1, keepdim=True))
        input = paddle.divide(input, input_norm)

        weight_norm = paddle.sqrt(
            paddle.sum(paddle.square(self.weight), axis=0, keepdim=True))
        weight = paddle.divide(self.weight, weight_norm)

        cos = paddle.matmul(input, weight)
        if not self.training or label is None:
            return cos
        sin = paddle.sqrt(1.0 - paddle.square(cos) + 1e-6)
        cos_m = math.cos(self.margin)
        sin_m = math.sin(self.margin)
        phi = cos * cos_m - sin * sin_m

        th = math.cos(self.margin) * (-1)
        mm = math.sin(self.margin) * self.margin
        if self.easy_margin:
            phi = self._paddle_where_more_than(cos, 0, phi, cos)
        else:
            phi = self._paddle_where_more_than(cos, th, phi, cos - mm)

        one_hot = paddle.nn.functional.one_hot(label, self.class_num)
        one_hot = paddle.squeeze(one_hot, axis=[1])
        output = paddle.multiply(one_hot, phi) + paddle.multiply(
            (1.0 - one_hot), cos)
        output = output * self.scale
        return output
Example #3
0
    def sqrt_newton_schulz_autograd(self, A, numIters):
        A_shape = A.shape
        batchSize = A_shape[0]
        dim = A_shape[1]

        normA = A * A
        normA = paddle.sum(normA, axis=1)
        normA = paddle.sum(normA, axis=1)
        normA = paddle.sqrt(normA)
        normA1 = normA.reshape([batchSize, 1, 1])
        Y = paddle.divide(A, paddle.expand_as(normA1, A))
        I = paddle.eye(dim, dim).reshape([1, dim, dim])
        l0 = []
        for i in range(batchSize):
            l0.append(I)
        I = paddle.concat(l0, axis=0)
        I.stop_gradient = False
        Z = paddle.eye(dim, dim).reshape([1, dim, dim])
        l1 = []
        for i in range(batchSize):
            l1.append(Z)
        Z = paddle.concat(l1, axis=0)
        Z.stop_gradient = False

        for i in range(numIters):
            T = 0.5 * (3.0 * I - Z.bmm(Y))
            Y = Y.bmm(T)
            Z = T.bmm(Z)
        sA = Y * paddle.sqrt(normA1).reshape([batchSize, 1, 1])
        sA = paddle.expand_as(sA, A)
        return sA
Example #4
0
    def forward(self, x):
        x0 = self.linear0(x[0])
        x1 = self.linear1(x[1])
        bs = x1.shape[0]
        if self.dropout_input > 0:
            x0 = F.dropout(x0, p=self.dropout_input, training=self.training)
            x1 = F.dropout(x1, p=self.dropout_input, training=self.training)
        x0_chunks = paddle.split(x0, self.chunks, -1)
        x1_chunks = paddle.split(x1, self.chunks, -1)
        zs = []
        for x0_c, x1_c, m0, m1 in zip(x0_chunks, x1_chunks, self.merge_linears0,
                                      self.merge_linears1):
            m = m0(x0_c) * m1(x1_c)  # bs x split_size*rank
            m = m.reshape([bs, self.rank, -1])
            z = paddle.sum(m, 1)
            if self.pos_norm == 'before_cat':
                z = paddle.sqrt(F.relu(z)) - paddle.sqrt(F.relu(-z))
                z = F.normalize(z)
            zs.append(z)
        z = paddle.concat(zs, 1)
        if self.pos_norm == 'after_cat':
            z = paddle.sqrt(F.relu(z)) - paddle.sqrt(F.relu(-z))
            z = F.normalize(z)

        if self.dropout_pre_lin > 0:
            z = F.dropout(z, p=self.dropout_pre_lin, training=self.training)
        z = self.linear_out(z)
        if self.dropout_output > 0:
            z = F.dropout(z, p=self.dropout_output, training=self.training)
        return z
Example #5
0
    def forward(self, x):
        in_mean, in_var = paddle.mean(x, axis=[2, 3], keepdim=True), paddle.var(x, axis=[2, 3], keepdim=True)
        out_in = (x - in_mean) / paddle.sqrt(in_var + self.eps)
        ln_mean, ln_var = paddle.mean(x, axis=[1, 2, 3], keepdim=True), paddle.var(x, axis=[1, 2, 3], keepdim=True)
        out_ln = (x - ln_mean) / paddle.sqrt(ln_var + self.eps)
        out = self.rho.expand([x.shape[0], -1, -1, -1]) * out_in + \
              (1-self.rho.expand([x.shape[0], -1, -1, -1])) * out_ln
        out = out * self.gamma.expand([x.shape[0], -1, -1, -1]) + self.beta.expand([x.shape[0], -1, -1, -1])

        return out
Example #6
0
    def _margin_softmax(input, label, out_dim, param_attr, margin1, margin2,
                        margin3, scale, sample_ratio):
        input_norm = paddle.sqrt(
            paddle.sum(paddle.square(input), axis=1, keepdim=True))
        input = paddle.divide(input, input_norm)

        if param_attr is None:
            param_attr = paddle.ParamAttr(
                initializer=paddle.nn.initializer.XavierNormal(fan_in=0.0))
        weight = paddle.static.create_parameter(
            shape=[input.shape[1], out_dim],
            dtype='float32',
            name=unique_name.generate('final_fc_w'),
            attr=param_attr)

        if sample_ratio < 1.0:
            # partial fc sample process
            label, sampled_class_index = class_center_sample(
                label, out_dim, ratio=sample_ratio, ignore_label=-1)
            sampled_class_index.stop_gradient = True
            weight = paddle.gather(weight, sampled_class_index, axis=1)
            out_dim = paddle.shape(sampled_class_index)

        weight_norm = paddle.sqrt(
            paddle.sum(paddle.square(weight), axis=0, keepdim=True))
        weight = paddle.divide(weight, weight_norm)
        cos = paddle.matmul(input, weight)

        theta = paddle.acos(cos)
        if margin1 != 1.0:
            theta = margin1 * theta
        if margin2 != 0.0:
            theta = theta + margin2
        margin_cos = paddle.cos(theta)
        if margin3 != 0.0:
            margin_cos = margin_cos - margin3

        one_hot = paddle.nn.functional.one_hot(label, num_classes=out_dim)
        diff = paddle.multiply(paddle.subtract(margin_cos, cos), one_hot)
        target_cos = paddle.add(cos, diff)
        logit = paddle.scale(target_cos, scale=scale)

        loss, prob = paddle.nn.functional.softmax_with_cross_entropy(
            logits=logit,
            label=paddle.reshape(label, (-1, 1)),
            return_softmax=True)
        avg_loss = paddle.mean(x=loss)

        one_hot.stop_gradient = True

        return avg_loss, prob
Example #7
0
    def __init__(self, height=64, width=64, with_r=False, with_boundary=False):
        super(AddCoordsTh, self).__init__()
        self.with_r = with_r
        self.with_boundary = with_boundary

        with paddle.no_grad():
            x_coords = paddle.arange(height).unsqueeze(1).expand(
                (height, width)).astype('float32')
            y_coords = paddle.arange(width).unsqueeze(0).expand(
                (height, width)).astype('float32')
            x_coords = (x_coords / (height - 1)) * 2 - 1
            y_coords = (y_coords / (width - 1)) * 2 - 1
            coords = paddle.stack([x_coords, y_coords],
                                  axis=0)  # (2, height, width)

            if self.with_r:
                rr = paddle.sqrt(
                    paddle.pow(x_coords, 2) +
                    paddle.pow(y_coords, 2))  # (height, width)
                rr = (rr / paddle.max(rr)).unsqueeze(0)
                coords = paddle.concat([coords, rr], axis=0)

            self.coords = coords.unsqueeze(0)  # (1, 2 or 3, height, width)
            self.x_coords = x_coords
            self.y_coords = y_coords
Example #8
0
    def forward(self):
        fpn_rois = self.input('FpnRois', 0)
        areas = self.bbox_area(fpn_rois)
        scale = paddle.sqrt(areas)
        num_level = self.max_level - self.min_level + 1
        target_level = paddle.log(scale / self.refer_scale + 1e-06) / np.log(2)
        target_level = paddle.floor(self.refer_level + target_level)
        target_level = paddle.clip(target_level,
                                   min=self.min_level,
                                   max=self.max_level)

        rois = list()
        rois_idx_order = list()

        for level in range(self.min_level, self.max_level + 1):
            level_tensor = paddle.full_like(target_level, fill_value=level)
            res = paddle.equal(target_level, level_tensor)
            res = paddle.squeeze(res, axis=1)
            res = paddle.cast(res, dtype='int32')
            index = paddle.nonzero(res)
            roi = paddle.gather(fpn_rois, index, axis=0)
            rois.append(roi)
            rois_idx_order.append(index)
        rois_idx_order = paddle.concat(rois_idx_order, axis=0)
        size = paddle.shape(rois_idx_order)[0]
        _, rois_idx_restore = paddle.topk(rois_idx_order,
                                          axis=0,
                                          sorted=True,
                                          largest=False,
                                          k=size)
        #rois_idx_restore = paddle.cast(rois_idx_restore, dtype='int32')
        return {'MultiFpnRois': rois, 'RestoreIndex': [rois_idx_restore]}
Example #9
0
def supervised_chi_loss(ret, batch, value, config):
    """Computes loss for direct chi angle supervision.

    Jumper et al. (2021) Suppl. Alg. 27 "torsionAngleLoss"

    Args:
        ret: Dictionary to write outputs into, needs to contain 'loss'.
        batch: Batch, needs to contain 'seq_mask', 'chi_mask', 'chi_angles'.
        value: Dictionary containing structure module output, needs to contain
            value['sidechains']['angles_sin_cos'] for angles and
            value['sidechains']['unnormalized_angles_sin_cos'] for unnormalized
            angles.
        config: Configuration of loss, should contain 'chi_weight' and
            'angle_norm_weight', 'angle_norm_weight' scales angle norm term,
            'chi_weight' scales torsion term.
    """
    eps = 1e-6
    
    sequence_mask = batch['seq_mask']
    num_res = sequence_mask.shape[1]
    batch_size = sequence_mask.shape[0]
    chi_mask = batch['chi_mask']
    pred_angles = paddle.reshape(value['sidechains']['angles_sin_cos'], [batch_size, -1, num_res, 7, 2])
    pred_angles = pred_angles[:, :, :, 3:]

    residue_type_one_hot = paddle.nn.functional.one_hot(batch['aatype_index'], 
                            num_classes=residue_constants.restype_num + 1)
    chi_pi_periodic = paddle.einsum('nijk, nkl->nijl', residue_type_one_hot[:, None, ...], 
                            paddle.to_tensor(residue_constants.chi_pi_periodic)[None])

    sin_cos_true_chi = batch['chi_angles_sin_cos'][:, None, ...]

    # This is -1 if chi is pi-periodic and +1 if it's 2pi-periodic
    shifted_mask = (1 - 2 * chi_pi_periodic)[..., None]
    sin_cos_true_chi_shifted = shifted_mask * sin_cos_true_chi

    sq_chi_error = paddle.sum(squared_difference(sin_cos_true_chi, pred_angles), axis=-1)
    sq_chi_error_shifted = paddle.sum(squared_difference(sin_cos_true_chi_shifted, pred_angles), axis=-1)
    sq_chi_error = paddle.minimum(sq_chi_error, sq_chi_error_shifted)

    sq_chi_loss_tmp = []
    for i in range(batch_size):
        sq_chi_loss_i = utils.mask_mean(mask=paddle.unsqueeze(chi_mask[i], axis=0), value=sq_chi_error[i])
        sq_chi_loss_tmp.append(sq_chi_loss_i)
    sq_chi_loss = paddle.to_tensor(sq_chi_loss_tmp, stop_gradient=False)
    sq_chi_loss = paddle.squeeze(sq_chi_loss, axis=-1)
    ret['chi_loss'] = sq_chi_loss
    ret['loss'] += config.chi_weight * sq_chi_loss

    unnormed_angles = paddle.reshape(value['sidechains']['unnormalized_angles_sin_cos'], [batch_size, -1, num_res, 7, 2])
    angle_norm = paddle.sqrt(paddle.sum(paddle.square(unnormed_angles), axis=-1) + eps)
    norm_error = paddle.abs(angle_norm - 1.)
    angle_norm_loss_tmp = []
    for i in range(batch_size):
        angle_norm_loss_i = utils.mask_mean(mask=paddle.unsqueeze(sequence_mask[i], axis=[0,2]), value=norm_error[i])
        angle_norm_loss_tmp.append(angle_norm_loss_i)
    angle_norm_loss = paddle.to_tensor(angle_norm_loss_tmp, stop_gradient=False)
    angle_norm_loss = paddle.squeeze(angle_norm_loss, axis=-1)
    ret['angle_norm_loss'] = angle_norm_loss
    ret['loss'] += config.angle_norm_weight * angle_norm_loss
Example #10
0
    def forward(ctx, target: paddle.Tensor,
                source: paddle.Tensor) -> Tuple[paddle.Tensor, paddle.Tensor]:
        """Find the top-3 nearest neighbors of the target set from the source
        set.

        Args:
            target (Tensor): shape (B, N, 3), points set that needs to
                find the nearest neighbors.
            source (Tensor): shape (B, M, 3), points set that is used
                to find the nearest neighbors of points in target set.

        Returns:
            Tensor: shape (B, N, 3), L2 distance of each point in target
                set to their corresponding nearest neighbors.
        """

        B, N, _ = target.size()
        m = source.size(1)
        dist2 = paddle.zeros((B, N, 3), dtype=paddle.float32)
        idx = paddle.zeros((B, N, 3), dtype=paddle.int64)

        interpolate_ops.three_nn_wrapper(B, N, m, target, source, dist2, idx)

        idx.stop_gradient = True

        return paddle.sqrt(dist2), idx
Example #11
0
def _weight_norm(v, g, dim):
    shape = v.shape
    ndims = len(shape)

    if dim == -1:
        v_normalized = v / (paddle.sqrt(paddle.sum(paddle.square(v))) + 1e-12)
    elif dim == 0:
        p_matrix = paddle.reshape(v, (shape[0], -1))
        v_normalized = F.l2_normalize(p_matrix, axis=1)
        v_normalized = paddle.reshape(v_normalized, shape)
    elif dim == ndims - 1:
        p_matrix = paddle.reshape(v, (-1, shape[-1]))
        v_normalized = F.l2_normalize(p_matrix, axis=0)
        v_normalized = paddle.reshape(v_normalized, shape)
    else:
        perm = list(range(ndims))
        perm[0] = dim
        perm[dim] = 0
        p_transposed = paddle.transpose(v, perm)
        transposed_shape = p_transposed.shape
        p_matrix = paddle.reshape(p_transposed, (p_transposed.shape[0], -1))
        v_normalized = F.l2_normalize(p_matrix, axis=1)
        v_normalized = paddle.reshape(v_normalized, transposed_shape)
        v_normalized = paddle.transpose(v_normalized, perm)
    weight = F.elementwise_mul(v_normalized,
                               g,
                               axis=dim if dim is not None else -1)
    return weight
Example #12
0
def bev_box_encode(boxes, anchors, encode_angle_to_vector=False, smooth_dim=False):
    """box encode for VoxelNet
    Args:
        boxes ([N, 7] Tensor): normal boxes: x, y, z, l, w, h, r
        anchors ([N, 7] Tensor): anchors
    """
    xa, ya, wa, la, ra = paddle.split(anchors, 5, axis=-1)
    xg, yg, wg, lg, rg = paddle.split(boxes, 5, axis=-1)
    diagonal = paddle.sqrt(la**2 + wa**2)
    xt = (xg - xa) / diagonal
    yt = (yg - ya) / diagonal
    if smooth_dim:
        lt = lg / la - 1
        wt = wg / wa - 1
    else:
        lt = paddle.log(lg / la)
        wt = paddle.log(wg / wa)
    if encode_angle_to_vector:
        rgx = paddle.cos(rg)
        rgy = paddle.sin(rg)
        rax = paddle.cos(ra)
        ray = paddle.sin(ra)
        rtx = rgx - rax
        rty = rgy - ray
        return paddle.concat([xt, yt, wt, lt, rtx, rty], axis=-1)
    else:
        rt = rg - ra
        return paddle.concat([xt, yt, wt, lt, rt], axis=-1)
Example #13
0
 def forward(self, x):
     """LayerNorm."""
     v = x.mean(-1, keepdim=True)
     s = (x - v).pow(2).mean(-1, keepdim=True)
     x = (x - v) / paddle.sqrt(s + self.variance_epsilon)
     res = self.gamma * x + self.beta
     return res
Example #14
0
    def test_fourth_order(self):
        enable_prim()
        main = paddle.static.Program()
        startup = paddle.static.Program()
        with paddle.static.program_guard(main, startup):
            x = paddle.static.data(name='x', shape=[1], dtype='float32')
            x2 = paddle.multiply(x, x)
            x3 = paddle.multiply(x2, x)
            x4 = paddle.multiply(x3, x)
            x5 = paddle.multiply(x4, x)
            out = paddle.sqrt(x5 + x4)

            grad1, = paddle.static.gradients([out], [x])
            grad2, = paddle.static.gradients([grad1], [x])
            grad3, = paddle.static.gradients([grad2], [x])
            grad4, = paddle.static.gradients([grad3], [x])

            prim2orig(main.block(0))

        feed = {
            x.name: np.array([2.]).astype('float32'),
        }
        fetch_list = [grad4.name]
        # (3*(-5*x^2-16*x-16))/(16*(x+1)^3.5)
        result = [np.array([-0.27263762711])]

        place = paddle.CPUPlace()
        if paddle.device.is_compiled_with_cuda():
            place = paddle.CUDAPlace(0)
        exe = paddle.static.Executor(place)
        exe.run(startup)
        outs = exe.run(main, feed=feed, fetch_list=fetch_list)
        np.allclose(outs, result)
        disable_prim()
Example #15
0
 def cdist(self, a, b):
     a_s = paddle.norm(a, p=2, axis=-1).pow(2)
     b_s = paddle.norm(b, p=2, axis=-1).pow(2)
     dist_score = -2 * paddle.bmm(a, b.transpose(
         [0, 2, 1])) + a_s.unsqueeze(-1)
     dist_score = paddle.sqrt(paddle.clip(dist_score, min=1e-30))
     return dist_score
Example #16
0
    def forward(self, x):

        x = x.unsqueeze(1)
        x = self.conv1(x)
        x = self.relu(x)
        x = self.bn1(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = x.reshape((x.shape[0], -1, x.shape[-1]))
        w = self.attention(x)
        if self.encoder_type == "SAP":
            x = paddle.sum(x * w, axis=2)
        elif self.encoder_type == "ASP":
            mu = paddle.sum(x * w, axis=2)
            sg = paddle.sum((x**2) * w, axis=2) - mu**2
            sg = paddle.clip(sg, min=1e-5)
            sg = paddle.sqrt(sg)
            x = paddle.concat((mu, sg), 1)

        x = x.reshape((x.shape[0], -1))
        x = self.fc(x)

        return x
Example #17
0
 def forward(self, graph, feature):
     """graph norm"""
     nodes = paddle.ones(shape=[graph.num_nodes, 1], dtype="float32")
     norm = self.graph_pool(graph, nodes)
     norm = paddle.sqrt(norm)
     norm = paddle.gather(norm, graph.graph_node_id)
     return feature / norm
Example #18
0
def calc_square_dist(point_feat_a, point_feat_b, norm=True):
    """Calculating square distance between a and b.

    Args:
        point_feat_a (Tensor): (B, N, C) Feature vector of each point.
        point_feat_b (Tensor): (B, M, C) Feature vector of each point.
        norm (Bool): Whether to normalize the distance.
            Default: True.

    Returns:
        Tensor: (B, N, M) Distance between each pair points.
    """
    length_a = point_feat_a.shape[1]
    length_b = point_feat_b.shape[1]
    num_channel = point_feat_a.shape[-1]
    # [bs, n, 1]
    a_square = paddle.sum(point_feat_a.unsqueeze(2).pow(2), axis=-1)
    # [bs, 1, m]
    b_square = paddle.sum(point_feat_b.unsqueeze(1).pow(2), axis=-1)
    a_square = paddle.tile(a_square, (1, 1, length_b))  # [bs, n, m]
    b_square = paddle.tile(b_square, (1, length_a, 1))  # [bs, n, m]

    coor = paddle.matmul(point_feat_a, point_feat_b.transpose((1, 2)))

    dist = a_square + b_square - 2 * coor
    if norm:
        dist = paddle.sqrt(dist) / num_channel
    return dist
Example #19
0
    def gen_base_anchors(self):
        w = self.base_size
        h = self.base_size
        if self.ctr is None:
            x_ctr = 0.5 * (w - 1)
            y_ctr = 0.5 * (h - 1)
        else:
            x_ctr, y_ctr = self.ctr

        h_ratios = paddle.sqrt(self.ratios)
        w_ratios = 1 / h_ratios
        if self.scale_major:
            ws = (w * w_ratios[:] * self.scales[:]).reshape([-1])
            hs = (h * h_ratios[:] * self.scales[:]).reshape([-1])
        else:
            ws = (w * self.scales[:] * w_ratios[:]).reshape([-1])
            hs = (h * self.scales[:] * h_ratios[:]).reshape([-1])

        base_anchors = paddle.stack(
            [
                x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1),
                x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)
            ],
            axis=-1)
        base_anchors = paddle.round(base_anchors)
        return base_anchors
Example #20
0
def bev_box_decode(box_encodings, anchors, encode_angle_to_vector=False, smooth_dim=False):
    """box decode for VoxelNet in lidar
    Args:
        boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
        anchors ([N, 7] Tensor): anchors
    """
    xa, ya, wa, la, ra = paddle.split(anchors, 5, axis=-1)
    if encode_angle_to_vector:
        xt, yt, wt, lt, rtx, rty = paddle.split(
            box_encodings, 1, axis=-1)

    else:
        xt, yt, wt, lt, rt = paddle.split(box_encodings, 5, axis=-1)

    # xt, yt, zt, wt, lt, ht, rt = paddle.split(box_encodings, 1, axis=-1)
    diagonal = paddle.sqrt(la**2 + wa**2)
    xg = xt * diagonal + xa
    yg = yt * diagonal + ya
    if smooth_dim:
        lg = (lt + 1) * la
        wg = (wt + 1) * wa
    else:
        lg = paddle.exp(lt) * la
        wg = paddle.exp(wt) * wa
    if encode_angle_to_vector:
        rax = paddle.cos(ra)
        ray = paddle.sin(ra)
        rgx = rtx + rax
        rgy = rty + ray
        rg = atan2(rgy, rgx)
    else:
        rg = rt + ra
    return paddle.concat([xg, yg, wg, lg, rg], axis=-1)
Example #21
0
def magphase(x: Tensor) -> Tuple[Tensor, Tensor]:
    """Compute compext norm of a given tensor.
    Typically,the input tensor is the result of a complex Fourier transform.
    Parameters:
        x(Tensor): The input tensor of shape (..., 2).
    Returns:
        The tuple of magnitude and phase.

    Shape:
        x: the shape of x is arbitrary, with the shape of last axis being 2
        outputs: the shapes of magnitude and phase are both input.shape[:-1]

     Examples:

        .. code-block:: python

        import paddle
        import paddleaudio.functional as F
        x = paddle.randn((10, 10, 2))
        angle, phase = F.magphase(x)

    """
    if x.shape[-1] != 2:
        raise ParameterError(
            f'complex tensor must be of shape (..., 2), but received {x.shape} instead'
        )
    mag = paddle.sqrt(paddle.square(x).sum(axis=-1))
    x0 = x.reshape((-1, 2))
    phase = paddle.atan2(x0[:, 0], x0[:, 1])
    phase = phase.reshape(x.shape[:-1])

    return mag, phase
Example #22
0
 def squash(self, Z):
     """squash
     """
     vec_squared_norm = paddle.sum(paddle.square(Z), axis=-1, keepdim=True)
     scalar_factor = vec_squared_norm / \
         (1 + vec_squared_norm) / paddle.sqrt(vec_squared_norm + 1e-8)
     vec_squashed = scalar_factor * Z
     return vec_squashed
Example #23
0
    def __call__(self, pred, target, **kwargs):
        """Forward Function.

        Args:
            pred (Tensor): of shape (N, C, H, W). Predicted tensor.
            target (Tensor): of shape (N, C, H, W). Ground truth tensor.
        """
        return paddle.sum(paddle.sqrt((pred - target)**2 + self.eps))
    def forward(self, input):
        in_mean, in_var = paddle.mean(input, [2, 3],
                                      keepdim=True), paddle.var(input, [2, 3],
                                                                keepdim=True)
        out_in = (input - in_mean) / paddle.sqrt(in_var + self.eps)
        ln_mean, ln_var = paddle.mean(input, [1, 2, 3],
                                      keepdim=True), paddle.var(input,
                                                                [1, 2, 3],
                                                                keepdim=True)
        out_ln = (input - ln_mean) / paddle.sqrt(ln_var + self.eps)
        out = self.rho.expand([input.shape[0], -1, -1, -1]) * out_in + (
            1 - self.rho.expand([input.shape[0], -1, -1, -1])) * out_ln
        out = out * self.gamma.expand([input.shape[0], -1, -1, -1
                                       ]) + self.beta.expand(
                                           [input.shape[0], -1, -1, -1])

        return out
Example #25
0
def rigids_from_3_points(p_neg_x_axis: paddle.Tensor,
                         origin: paddle.Tensor,
                         p_xy_plane: paddle.Tensor,
                         eps: float = 1e-8) -> Rigids:
    """Create Rigids from 3 points.

    Jumper et al. (2021) Suppl. Alg. 21 "rigidFrom3Points"
    This creates a set of rigid transformations from 3 points by Gram Schmidt
    orthogonalization.

    Argss:
        point_on_neg_x_axis: [*, 3] coordinates
        origin: [*, 3] coordinates
        point_on_xy_plane: [*, 3] coordinates
        eps: small regularizer added to squared norm before taking square root.
    Returns:
        Rigids corresponding to transformations from global frame
        to local frames derived from the input points.
    """
    p_neg_x_axis = paddle.unbind(p_neg_x_axis, axis=-1)
    origin = paddle.unbind(origin, axis=-1)
    p_xy_plane = paddle.unbind(p_xy_plane, axis=-1)

    e0 = [c1 - c2 for c1, c2 in zip(origin, p_neg_x_axis)]
    e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane, origin)]

    norms = paddle.sqrt(
        paddle.square(e0[0]) + paddle.square(e0[1]) + paddle.square(e0[2]) +
        eps)
    e0 = [c / norms for c in e0]
    dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))
    e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]
    norms = paddle.sqrt(
        paddle.square(e1[0]) + paddle.square(e1[1]) + paddle.square(e1[2]) +
        eps)
    e1 = [c / norms for c in e1]
    e2 = [
        e0[1] * e1[2] - e0[2] * e1[1],
        e0[2] * e1[0] - e0[0] * e1[2],
        e0[0] * e1[1] - e0[1] * e1[0],
    ]

    rots = paddle.stack([c for tup in zip(e0, e1, e2) for c in tup], axis=-1)

    return Rigids(Rots(rots), Vecs(origin))
    def _test(self, run_npu=True):
        main_prog = paddle.static.Program()
        startup_prog = paddle.static.Program()
        main_prog.random_seed = SEED
        startup_prog.random_seed = SEED
        np.random.seed(SEED)

        a_np = np.random.random(size=(4, 32)).astype('float32')
        b_np = np.random.random(size=(4, 32)).astype('float32')
        label_np = np.random.randint(2, size=(4, 1)).astype('int64')

        with paddle.static.program_guard(main_prog, startup_prog):
            a = paddle.static.data(name="a", shape=[4, 32], dtype='float32')
            b = paddle.static.data(name="b", shape=[4, 32], dtype='float32')
            label = paddle.static.data(name="label",
                                       shape=[4, 1],
                                       dtype='int64')

            c = paddle.multiply(a, b)
            d = paddle.sqrt(c)

            # 4 x 128
            fc_1 = fluid.layers.fc(input=d, size=128)
            # 4 x 2
            prediction = fluid.layers.fc(input=fc_1, size=2)

            # 4 x 2
            prob = fluid.layers.softmax(prediction, axis=1)

            cost = fluid.layers.cross_entropy(input=prob, label=label)
            loss = fluid.layers.mean(cost)
            sgd = fluid.optimizer.SGD(learning_rate=0.01)
            sgd.minimize(loss)

        if run_npu:
            place = paddle.NPUPlace(0)
        else:
            place = paddle.CPUPlace()

        exe = paddle.static.Executor(place)
        exe.run(startup_prog)

        print("Start run on {}".format(place))
        for epoch in range(100):

            pred_res, loss_res = exe.run(main_prog,
                                         feed={
                                             "a": a_np,
                                             "b": b_np,
                                             "label": label_np
                                         },
                                         fetch_list=[prediction, loss])
            if epoch % 10 == 0:
                print("Epoch {} | Prediction[0]: {}, Loss: {}".format(
                    epoch, pred_res[0], loss_res))

        return pred_res, loss_res
Example #27
0
 def forward(self, x: Tensor) -> Tensor:
     fft_signal = self._stft(x)
     spectrogram = paddle.square(fft_signal).sum(-1)
     if self.power == 2.0:
         pass
     elif self.power == 1.0:
         spectrogram = paddle.sqrt(spectrogram)
     else:
         spectrogram = spectrogram**(self.power / 2.0)
     return spectrogram
def calc_dist_matrix(x, y):
    """Calculate Euclidean distance matrix with paddle.Tensor"""
    n = x.shape[0]
    m = y.shape[0]
    d = x.shape[1]
    x = x.unsqueeze(1)
    x = paddle.expand(x, [n, m, d])
    y = y.unsqueeze(0)
    y = paddle.expand(y, [n, m, d])
    dist_matrix = paddle.sqrt(paddle.pow(x - y, 2).sum(2))
    return dist_matrix
Example #29
0
def vecs_robust_norm(v: Vecs, epsilon: float = 1e-8) -> paddle.Tensor:
    """Computes norm of vectors 'v'.

    Args:
        v: vectors to be normalized.
        epsilon: small regularizer added to squared norm before taking square root.
    Returns:
        norm of 'v'
    """
    return paddle.sqrt(
        paddle.square(v.x) + paddle.square(v.y) + paddle.square(v.z) + epsilon)
Example #30
0
def clip_grad_norm_(grads_fp32,
                    grads_fp16,
                    grad_norm_clip=2.0,
                    grad_norm_clip_max=2.0):

    if len(grads_fp32) <= 0 and len(grads_fp16) <= 0:
        print('grads_fp32 and grads_fp16 are empty')
        return None

    if len(grads_fp32) > 0:
        norm_fp32 = paddle.sum(
            paddle.stack([
                paddle.matmul(g.detach().reshape((1, -1)),
                              g.detach().reshape((-1, 1))) for g in grads_fp32
            ]))
    if len(grads_fp16) > 0:
        norm_fp16 = paddle.sum(
            paddle.stack([
                paddle.matmul(g.detach().reshape((1, -1)),
                              g.detach().reshape((-1, 1))) for g in grads_fp16
            ]))

    if len(grads_fp32) > 0 and len(grads_fp16) > 0:
        global_norm = paddle.sqrt(norm_fp32 + paddle.cast(norm_fp16,
                                                          'float32'))
    elif len(grads_fp32) > 0:
        global_norm = paddle.sqrt(norm_fp32)
    elif len(grads_fp16) > 0:
        global_norm = paddle.cast(norm_fp16, 'float32')

    clip_coef_fp32 = paddle.clip(
        grad_norm_clip / (global_norm + 1e-6), max=grad_norm_clip_max)

    if len(grads_fp32) > 0:
        grads_fp32 = [g.scale_(clip_coef_fp32) for g in grads_fp32]

    if len(grads_fp16) > 0:
        clip_coef_fp16 = paddle.cast(clip_coef_fp32, 'float16')
        grads_fp16 = [g.scale_(clip_coef_fp16) for g in grads_fp16]

    return global_norm