Exemplo n.º 1
0
    def forward(self, input, label=None):
        input_norm = paddle.sqrt(
            paddle.sum(paddle.square(input), axis=1, keepdim=True))
        input = paddle.divide(input, input_norm)

        weight_norm = paddle.sqrt(
            paddle.sum(paddle.square(self.weight), axis=0, keepdim=True))
        weight = paddle.divide(self.weight, weight_norm)

        cos = paddle.matmul(input, weight)
        if not self.training or label is None:
            return cos
        sin = paddle.sqrt(1.0 - paddle.square(cos) + 1e-6)
        cos_m = math.cos(self.margin)
        sin_m = math.sin(self.margin)
        phi = cos * cos_m - sin * sin_m

        th = math.cos(self.margin) * (-1)
        mm = math.sin(self.margin) * self.margin
        if self.easy_margin:
            phi = self._paddle_where_more_than(cos, 0, phi, cos)
        else:
            phi = self._paddle_where_more_than(cos, th, phi, cos - mm)

        one_hot = paddle.nn.functional.one_hot(label, self.class_num)
        one_hot = paddle.squeeze(one_hot, axis=[1])
        output = paddle.multiply(one_hot, phi) + paddle.multiply(
            (1.0 - one_hot), cos)
        output = output * self.scale
        return output
Exemplo n.º 2
0
    def forward(self, sparse_inputs, dense_inputs):
        # -------------------- first order term  --------------------
        sparse_inputs_concat = paddle.concat(sparse_inputs, axis=1)
        sparse_emb_one = self.embedding_one(sparse_inputs_concat)

        dense_emb_one = paddle.multiply(dense_inputs, self.dense_w_one)
        dense_emb_one = paddle.unsqueeze(dense_emb_one, axis=2)

        y_first_order = paddle.sum(sparse_emb_one, 1) + paddle.sum(
            dense_emb_one, 1)

        # -------------------- second order term  --------------------
        sparse_embeddings = self.embedding(sparse_inputs_concat)
        dense_inputs_re = paddle.unsqueeze(dense_inputs, axis=2)
        dense_embeddings = paddle.multiply(dense_inputs_re, self.dense_w)
        feat_embeddings = paddle.concat([sparse_embeddings, dense_embeddings],
                                        1)
        # sum_square part
        summed_features_emb = paddle.sum(feat_embeddings,
                                         1)  # None * embedding_size
        summed_features_emb_square = paddle.square(
            summed_features_emb)  # None * embedding_size

        # square_sum part
        squared_features_emb = paddle.square(
            feat_embeddings)  # None * num_field * embedding_size
        squared_sum_features_emb = paddle.sum(squared_features_emb,
                                              1)  # None * embedding_size

        y_second_order = 0.5 * paddle.sum(
            summed_features_emb_square - squared_sum_features_emb,
            1,
            keepdim=True)  # None * 1

        return y_first_order, y_second_order, feat_embeddings
Exemplo n.º 3
0
    def forward(self, input, label):
        feat_norm = paddle.sqrt(
            paddle.sum(paddle.square(input), axis=1, keepdim=True))
        input = paddle.divide(input, feat_norm)

        weight_norm = paddle.sqrt(
            paddle.sum(paddle.square(self.weight), axis=0, keepdim=True))
        weight = paddle.divide(self.weight, weight_norm)

        logits = paddle.matmul(input, weight)
        if not self.training or label is None:
            return logits

        alpha_p = paddle.clip(-logits.detach() + 1 + self.margin, min=0.)
        alpha_n = paddle.clip(logits.detach() + self.margin, min=0.)
        delta_p = 1 - self.margin
        delta_n = self.margin

        m_hot = F.one_hot(label.reshape([-1]), num_classes=logits.shape[1])

        logits_p = alpha_p * (logits - delta_p)
        logits_n = alpha_n * (logits - delta_n)
        pre_logits = logits_p * m_hot + logits_n * (1 - m_hot)
        pre_logits = self.scale * pre_logits

        return pre_logits
Exemplo n.º 4
0
    def forward(self, feat_idx, feat_value):
        # -------------------- first order term  --------------------
        first_weights_re = self.embedding_w(feat_idx)
        first_weights = paddle.reshape(first_weights_re,
                                       shape=[-1, self.args.num_field,
                                              1])  # None * num_field * 1
        y_first_order = paddle.sum(first_weights * feat_value, 1)

        # -------------------- second order term  --------------------
        feat_embeddings_re = self.embedding(feat_idx)
        feat_embeddings = paddle.reshape(
            feat_embeddings_re,
            shape=[-1, self.args.num_field, self.args.embedding_size
                   ])  # None * num_field * embedding_size
        feat_embeddings = feat_embeddings * feat_value  # None * num_field * embedding_size

        # sum_square part
        summed_features_emb = paddle.sum(feat_embeddings,
                                         1)  # None * embedding_size
        summed_features_emb_square = paddle.square(
            summed_features_emb)  # None * embedding_size

        # square_sum part
        squared_features_emb = paddle.square(
            feat_embeddings)  # None * num_field * embedding_size
        squared_sum_features_emb = paddle.sum(squared_features_emb,
                                              1)  # None * embedding_size

        y_second_order = 0.5 * paddle.sum(
            summed_features_emb_square - squared_sum_features_emb,
            1,
            keepdim=True)  # None * 1

        return y_first_order, y_second_order, feat_embeddings
Exemplo n.º 5
0
    def setUp(self):
        with fluid.program_guard(self.main_program, self.startup_program):
            data_a = fluid.data(name="data_a", shape=[128, 1], dtype="float32")
            data_b = fluid.data(name="data_b", shape=[256, 1], dtype="float32")

            fc_a = fluid.layers.fc(data_a, size=256)
            fc_b = fluid.layers.fc(data_b, size=64)

            data_a_square = paddle.square(fc_a)
            data_b_square = paddle.square(fc_b)

            matmul_ab = paddle.matmul(fc_a, fc_b)
            matmul_ab_square = paddle.square(matmul_ab)
            matmul_square_ab = paddle.matmul(data_a_square, data_b_square)

            scale = paddle.fluid.layers.fill_constant(shape=[1],
                                                      value=0.5,
                                                      dtype='float32')

            sub_val = paddle.fluid.layers.elementwise_sub(
                matmul_ab_square, matmul_square_ab)
            squared_mat_sub_out = fluid.layers.elementwise_mul(sub_val, scale)

        self.feeds = {
            "data_a": np.random.random((128, 1)).astype("float32"),
            "data_b": np.random.random((256, 1)).astype("float32")
        }
        self.fetch_list = [squared_mat_sub_out]
Exemplo n.º 6
0
def vecs_robust_norm(v: Vecs, epsilon: float = 1e-8) -> paddle.Tensor:
    """Computes norm of vectors 'v'.

    Args:
        v: vectors to be normalized.
        epsilon: small regularizer added to squared norm before taking square root.
    Returns:
        norm of 'v'
    """
    return paddle.sqrt(
        paddle.square(v.x) + paddle.square(v.y) + paddle.square(v.z) + epsilon)
Exemplo n.º 7
0
    def _margin_softmax(input, label, out_dim, param_attr, margin1, margin2,
                        margin3, scale, sample_ratio):
        input_norm = paddle.sqrt(
            paddle.sum(paddle.square(input), axis=1, keepdim=True))
        input = paddle.divide(input, input_norm)

        if param_attr is None:
            param_attr = paddle.ParamAttr(
                initializer=paddle.nn.initializer.XavierNormal(fan_in=0.0))
        weight = paddle.static.create_parameter(
            shape=[input.shape[1], out_dim],
            dtype='float32',
            name=unique_name.generate('final_fc_w'),
            attr=param_attr)

        if sample_ratio < 1.0:
            # partial fc sample process
            label, sampled_class_index = class_center_sample(
                label, out_dim, ratio=sample_ratio, ignore_label=-1)
            sampled_class_index.stop_gradient = True
            weight = paddle.gather(weight, sampled_class_index, axis=1)
            out_dim = paddle.shape(sampled_class_index)

        weight_norm = paddle.sqrt(
            paddle.sum(paddle.square(weight), axis=0, keepdim=True))
        weight = paddle.divide(weight, weight_norm)
        cos = paddle.matmul(input, weight)

        theta = paddle.acos(cos)
        if margin1 != 1.0:
            theta = margin1 * theta
        if margin2 != 0.0:
            theta = theta + margin2
        margin_cos = paddle.cos(theta)
        if margin3 != 0.0:
            margin_cos = margin_cos - margin3

        one_hot = paddle.nn.functional.one_hot(label, num_classes=out_dim)
        diff = paddle.multiply(paddle.subtract(margin_cos, cos), one_hot)
        target_cos = paddle.add(cos, diff)
        logit = paddle.scale(target_cos, scale=scale)

        loss, prob = paddle.nn.functional.softmax_with_cross_entropy(
            logits=logit,
            label=paddle.reshape(label, (-1, 1)),
            return_softmax=True)
        avg_loss = paddle.mean(x=loss)

        one_hot.stop_gradient = True

        return avg_loss, prob
Exemplo n.º 8
0
 def forward(self, logit, label=None):
     N, fea_dim = logit.shape[:2]
     logit_norm = paddle.sqrt(paddle.sum(paddle.square(logit), axis=1)).reshape((N, 1, -1))
     logit = paddle.divide(logit, logit_norm)
     output = paddle.reshape(logit, shape=[-1, 3, fea_dim])
     anchor, positive, negative = paddle.split(output, num_or_sections=3, axis=1)
     anchor = paddle.reshape(anchor, shape=[-1, fea_dim])
     positive = paddle.reshape(positive, shape=[-1, fea_dim])
     negative = paddle.reshape(negative, shape=[-1, fea_dim])
     a_p = paddle.square(anchor - positive)
     a_n = paddle.square(anchor - negative)
     a_p = paddle.sum(a_p, axis=1)
     a_n = paddle.sum(a_n, axis=1)
     loss = F.relu(a_p + self.margin - a_n)
     return loss
Exemplo n.º 9
0
    def forward(self, input, label):
        feature = input["features"]
        logits = input["logits"]

        dist = paddle.sum(paddle.square(
            (paddle.unsqueeze(feature, 1) - paddle.unsqueeze(feature, 0))),
                          axis=2)

        # label to ont-hot
        label = paddle.flatten(label)
        n_class = logits.shape[1]
        label = paddle.nn.functional.one_hot(label, n_class).astype("float32")

        s = (paddle.matmul(label, label,
                           transpose_y=True) == 0).astype("float32")
        margin = 2 * feature.shape[1]
        Ld = (1 - s) / 2 * dist + s / 2 * (margin - dist).clip(min=0)
        Ld = Ld.mean()

        if self.multi_label:
            # multiple labels classification loss
            Lc = (logits - label * logits +
                  ((1 + (-logits).exp()).log())).sum(axis=1).mean()
        else:
            # single labels classification loss
            Lc = (-paddle.nn.functional.softmax(logits).log() *
                  label).sum(axis=1).mean()

        return {"dshsdloss": Lc + Ld * self.alpha}
Exemplo n.º 10
0
def supervised_chi_loss(ret, batch, value, config):
    """Computes loss for direct chi angle supervision.

    Jumper et al. (2021) Suppl. Alg. 27 "torsionAngleLoss"

    Args:
        ret: Dictionary to write outputs into, needs to contain 'loss'.
        batch: Batch, needs to contain 'seq_mask', 'chi_mask', 'chi_angles'.
        value: Dictionary containing structure module output, needs to contain
            value['sidechains']['angles_sin_cos'] for angles and
            value['sidechains']['unnormalized_angles_sin_cos'] for unnormalized
            angles.
        config: Configuration of loss, should contain 'chi_weight' and
            'angle_norm_weight', 'angle_norm_weight' scales angle norm term,
            'chi_weight' scales torsion term.
    """
    eps = 1e-6
    
    sequence_mask = batch['seq_mask']
    num_res = sequence_mask.shape[1]
    batch_size = sequence_mask.shape[0]
    chi_mask = batch['chi_mask']
    pred_angles = paddle.reshape(value['sidechains']['angles_sin_cos'], [batch_size, -1, num_res, 7, 2])
    pred_angles = pred_angles[:, :, :, 3:]

    residue_type_one_hot = paddle.nn.functional.one_hot(batch['aatype_index'], 
                            num_classes=residue_constants.restype_num + 1)
    chi_pi_periodic = paddle.einsum('nijk, nkl->nijl', residue_type_one_hot[:, None, ...], 
                            paddle.to_tensor(residue_constants.chi_pi_periodic)[None])

    sin_cos_true_chi = batch['chi_angles_sin_cos'][:, None, ...]

    # This is -1 if chi is pi-periodic and +1 if it's 2pi-periodic
    shifted_mask = (1 - 2 * chi_pi_periodic)[..., None]
    sin_cos_true_chi_shifted = shifted_mask * sin_cos_true_chi

    sq_chi_error = paddle.sum(squared_difference(sin_cos_true_chi, pred_angles), axis=-1)
    sq_chi_error_shifted = paddle.sum(squared_difference(sin_cos_true_chi_shifted, pred_angles), axis=-1)
    sq_chi_error = paddle.minimum(sq_chi_error, sq_chi_error_shifted)

    sq_chi_loss_tmp = []
    for i in range(batch_size):
        sq_chi_loss_i = utils.mask_mean(mask=paddle.unsqueeze(chi_mask[i], axis=0), value=sq_chi_error[i])
        sq_chi_loss_tmp.append(sq_chi_loss_i)
    sq_chi_loss = paddle.to_tensor(sq_chi_loss_tmp, stop_gradient=False)
    sq_chi_loss = paddle.squeeze(sq_chi_loss, axis=-1)
    ret['chi_loss'] = sq_chi_loss
    ret['loss'] += config.chi_weight * sq_chi_loss

    unnormed_angles = paddle.reshape(value['sidechains']['unnormalized_angles_sin_cos'], [batch_size, -1, num_res, 7, 2])
    angle_norm = paddle.sqrt(paddle.sum(paddle.square(unnormed_angles), axis=-1) + eps)
    norm_error = paddle.abs(angle_norm - 1.)
    angle_norm_loss_tmp = []
    for i in range(batch_size):
        angle_norm_loss_i = utils.mask_mean(mask=paddle.unsqueeze(sequence_mask[i], axis=[0,2]), value=norm_error[i])
        angle_norm_loss_tmp.append(angle_norm_loss_i)
    angle_norm_loss = paddle.to_tensor(angle_norm_loss_tmp, stop_gradient=False)
    angle_norm_loss = paddle.squeeze(angle_norm_loss, axis=-1)
    ret['angle_norm_loss'] = angle_norm_loss
    ret['loss'] += config.angle_norm_weight * angle_norm_loss
Exemplo n.º 11
0
    def forward(self, input, target=None):
        #normalization
        features = input["features"]
        features = self._nomalize(features)
        samples_each_class = self.samples_each_class
        rerange_index = paddle.to_tensor(self.rerange_index)

        #calc sm
        diffs = paddle.unsqueeze(features, axis=1) - paddle.unsqueeze(features,
                                                                      axis=0)
        similary_matrix = paddle.sum(paddle.square(diffs), axis=-1)

        #rerange
        tmp = paddle.reshape(similary_matrix, shape=[-1, 1])
        tmp = paddle.gather(tmp, index=rerange_index)
        similary_matrix = paddle.reshape(tmp, shape=[-1, self.batch_size])

        #split
        ignore, pos, neg = paddle.split(
            similary_matrix,
            num_or_sections=[1, samples_each_class - 1, -1],
            axis=1)
        ignore.stop_gradient = True

        hard_pos = paddle.max(pos)
        hard_neg = paddle.min(neg)

        loss = hard_pos + self.margin - hard_neg
        loss = paddle.nn.ReLU()(loss)
        return {"msmloss": loss}
Exemplo n.º 12
0
def magphase(x: Tensor) -> Tuple[Tensor, Tensor]:
    """Compute compext norm of a given tensor.
    Typically,the input tensor is the result of a complex Fourier transform.
    Parameters:
        x(Tensor): The input tensor of shape (..., 2).
    Returns:
        The tuple of magnitude and phase.

    Shape:
        x: the shape of x is arbitrary, with the shape of last axis being 2
        outputs: the shapes of magnitude and phase are both input.shape[:-1]

     Examples:

        .. code-block:: python

        import paddle
        import paddleaudio.functional as F
        x = paddle.randn((10, 10, 2))
        angle, phase = F.magphase(x)

    """
    if x.shape[-1] != 2:
        raise ParameterError(
            f'complex tensor must be of shape (..., 2), but received {x.shape} instead'
        )
    mag = paddle.sqrt(paddle.square(x).sum(axis=-1))
    x0 = x.reshape((-1, 2))
    phase = paddle.atan2(x0[:, 0], x0[:, 1])
    phase = phase.reshape(x.shape[:-1])

    return mag, phase
Exemplo n.º 13
0
def _weight_norm(v, g, dim):
    shape = v.shape
    ndims = len(shape)

    if dim == -1:
        v_normalized = v / (paddle.sqrt(paddle.sum(paddle.square(v))) + 1e-12)
    elif dim == 0:
        p_matrix = paddle.reshape(v, (shape[0], -1))
        v_normalized = F.l2_normalize(p_matrix, axis=1)
        v_normalized = paddle.reshape(v_normalized, shape)
    elif dim == ndims - 1:
        p_matrix = paddle.reshape(v, (-1, shape[-1]))
        v_normalized = F.l2_normalize(p_matrix, axis=0)
        v_normalized = paddle.reshape(v_normalized, shape)
    else:
        perm = list(range(ndims))
        perm[0] = dim
        perm[dim] = 0
        p_transposed = paddle.transpose(v, perm)
        transposed_shape = p_transposed.shape
        p_matrix = paddle.reshape(p_transposed, (p_transposed.shape[0], -1))
        v_normalized = F.l2_normalize(p_matrix, axis=1)
        v_normalized = paddle.reshape(v_normalized, transposed_shape)
        v_normalized = paddle.transpose(v_normalized, perm)
    weight = F.elementwise_mul(v_normalized,
                               g,
                               axis=dim if dim is not None else -1)
    return weight
Exemplo n.º 14
0
    def forward(self, input, target=None):
        """
        anchor and positive(should include label)
        """
        features = input["features"]
        reg_lambda = self.reg_lambda
        batch_size = features.shape[0]
        fea_dim = features.shape[1]
        num_class = batch_size // 2

        #reshape
        out_feas = paddle.reshape(features, shape=[-1, 2, fea_dim])
        anc_feas, pos_feas = paddle.split(out_feas, num_or_sections=2, axis=1)
        anc_feas = paddle.squeeze(anc_feas, axis=1)
        pos_feas = paddle.squeeze(pos_feas, axis=1)

        #get simi matrix
        similarity_matrix = paddle.matmul(
            anc_feas, pos_feas, transpose_y=True)  #get similarity matrix
        sparse_labels = paddle.arange(0, num_class, dtype='int64')
        xentloss = paddle.nn.CrossEntropyLoss()(
            similarity_matrix, sparse_labels)  #by default: mean

        #l2 norm
        reg = paddle.mean(paddle.sum(paddle.square(features), axis=1))
        l2loss = 0.5 * reg_lambda * reg
        return {"npairsloss": xentloss + l2loss}
Exemplo n.º 15
0
 def forward(self, x: Tensor) -> Tensor:
     assert x.ndim == 2, (f'the input tensor must be 2d tensor, ' +
                          f'but received x.ndim={x.ndim}')
     noise = self.noise_reader()
     if self.random:
         snr = random.uniform(self.snr_low, self.snr_high)
     else:
         snr = self.snr_high
     signal_mag = paddle.sum(paddle.square(x), -1)
     noise_mag = paddle.sum(paddle.square(noise), -1)
     alpha = 10**(snr / 10) * noise_mag / (signal_mag + 1e-10)
     beta = 1.0
     factor = alpha + beta
     alpha = alpha / factor
     beta = beta / factor
     x = alpha.unsqueeze((1, )) * x + beta.unsqueeze((1, )) * noise
     return x
Exemplo n.º 16
0
 def squash(self, Z):
     """squash
     """
     vec_squared_norm = paddle.sum(paddle.square(Z), axis=-1, keepdim=True)
     scalar_factor = vec_squared_norm / \
         (1 + vec_squared_norm) / paddle.sqrt(vec_squared_norm + 1e-8)
     vec_squashed = scalar_factor * Z
     return vec_squashed
Exemplo n.º 17
0
    def forward(self, inputs):

        fields_wise_embeds_list = inputs

        # MF module
        field_wise_vectors = paddle.concat(
            [
                paddle.sum(fields_i_vectors, axis=1, keepdim=True)
                for fields_i_vectors in fields_wise_embeds_list
            ],
            1)

        left = []
        right = []

        for i, j in itertools.combinations(list(range(self.num_fields)), 2):
            left.append(i)
            right.append(j)

        left = paddle.to_tensor(left)
        right = paddle.to_tensor(right)

        embeddings_left = paddle.gather(field_wise_vectors, index=left, axis=1)
        embeddings_right = paddle.gather(
            field_wise_vectors, index=right, axis=1)

        embeddings_prod = paddle.multiply(embeddings_left, embeddings_right)
        field_weighted_embedding = paddle.multiply(embeddings_prod,
                                                   self.kernel_mf)
        h_mf = paddle.sum(field_weighted_embedding, axis=1)

        if self.use_bias:
            h_mf = h_mf + self.bias_mf

        # FM module
        square_of_sum_list = [
            paddle.square(paddle.sum(field_i_vectors, axis=1, keepdim=True))
            for field_i_vectors in fields_wise_embeds_list
        ]

        sum_of_square_list = [
            paddle.sum(paddle.multiply(field_i_vectors, field_i_vectors),
                       axis=1,
                       keepdim=True)
            for field_i_vectors in fields_wise_embeds_list
        ]

        field_fm = paddle.concat([
            square_of_sum - sum_of_square for square_of_sum, sum_of_square in
            zip(square_of_sum_list, sum_of_square_list)
        ], 1)
        h_fm = paddle.sum(paddle.multiply(field_fm, self.kernel_fm), axis=1)

        if self.use_bias:
            h_fm = h_fm + self.bias_fm

        return h_mf
Exemplo n.º 18
0
 def forward(self, x):
     """
     Args:
         x(tensor): (-1, 1).
     Returns:
         y(tensor): (-1, n_centers)
     """
     x = paddle.reshape(x, [-1, 1])
     return paddle.exp(-self.gamma * paddle.square(x - self.centers))
Exemplo n.º 19
0
    def calc_kl_dvg(self, means, logvars):
        """
        Compute the KL divergence between Gaussian distribution
        """
        kl_cost = -0.5 * (logvars - paddle.square(means) -
                          paddle.exp(logvars) + 1.0)
        kl_cost = paddle.mean(kl_cost, 0)

        return paddle.sum(kl_cost)
Exemplo n.º 20
0
 def forward(self, x: Tensor) -> Tensor:
     fft_signal = self._stft(x)
     spectrogram = paddle.square(fft_signal).sum(-1)
     if self.power == 2.0:
         pass
     elif self.power == 1.0:
         spectrogram = paddle.sqrt(spectrogram)
     else:
         spectrogram = spectrogram**(self.power / 2.0)
     return spectrogram
Exemplo n.º 21
0
 def create_loss(self, prediction, config):
     pre, pos_r, q_emb, p_emb, H_i_emb = prediction
     weight = config.get('hyper_parameters.negative_weight', 0.5)
     loss = weight * paddle.sum(
         paddle.sum(
             paddle.sum(paddle.einsum('ab,ac->abc', q_emb, q_emb), 0) *
             paddle.sum(paddle.einsum('ab,ac->abc', p_emb, p_emb), 0) *
             paddle.matmul(H_i_emb, H_i_emb, transpose_y=True), 0), 0)
     loss += paddle.sum((1.0 - weight) * paddle.square(pos_r) - 2.0 * pos_r)
     return loss
Exemplo n.º 22
0
    def __call__(self, predicts, batch):
        assert isinstance(predicts, (list, tuple))
        features, predicts = predicts

        feats_reshape = paddle.reshape(
            features, [-1, features.shape[-1]]).astype("float64")
        label = paddle.argmax(predicts, axis=2)
        label = paddle.reshape(label, [label.shape[0] * label.shape[1]])

        batch_size = feats_reshape.shape[0]

        #calc l2 distance between feats and centers
        square_feat = paddle.sum(paddle.square(feats_reshape),
                                 axis=1,
                                 keepdim=True)
        square_feat = paddle.expand(square_feat,
                                    [batch_size, self.num_classes])

        square_center = paddle.sum(paddle.square(self.centers),
                                   axis=1,
                                   keepdim=True)
        square_center = paddle.expand(
            square_center, [self.num_classes, batch_size]).astype("float64")
        square_center = paddle.transpose(square_center, [1, 0])

        distmat = paddle.add(square_feat, square_center)
        feat_dot_center = paddle.matmul(feats_reshape,
                                        paddle.transpose(self.centers, [1, 0]))
        distmat = distmat - 2.0 * feat_dot_center

        #generate the mask
        classes = paddle.arange(self.num_classes).astype("int64")
        label = paddle.expand(paddle.unsqueeze(label, 1),
                              (batch_size, self.num_classes))
        mask = paddle.equal(
            paddle.expand(classes, [batch_size, self.num_classes]),
            label).astype("float64")
        dist = paddle.multiply(distmat, mask)

        loss = paddle.sum(paddle.clip(dist, min=1e-12, max=1e+12)) / batch_size
        return {'loss_center': loss}
Exemplo n.º 23
0
    def _test(self, run_npu=True):
        main_prog = paddle.static.Program()
        startup_prog = paddle.static.Program()
        main_prog.random_seed = SEED
        startup_prog.random_seed = SEED
        np.random.seed(SEED)

        a_np = np.random.random(size=(32, 32)).astype('float32')
        b_np = np.random.random(size=(32, 32)).astype('float32')
        label_np = np.random.randint(2, size=(32, 1)).astype('int64')

        with paddle.static.program_guard(main_prog, startup_prog):
            a = paddle.static.data(name="a", shape=[32, 32], dtype='float32')
            b = paddle.static.data(name="b", shape=[32, 32], dtype='float32')
            label = paddle.static.data(name="label",
                                       shape=[32, 1],
                                       dtype='int64')

            c = paddle.multiply(a, b)
            d = paddle.square(c)

            fc_1 = fluid.layers.fc(input=d, size=128)
            prediction = fluid.layers.fc(input=fc_1, size=2, act='softmax')

            cost = fluid.layers.cross_entropy(input=prediction, label=label)
            loss = fluid.layers.reduce_mean(cost)
            sgd = fluid.optimizer.SGD(learning_rate=0.01)
            sgd.minimize(loss)

        if run_npu:
            place = paddle.NPUPlace(0)
        else:
            place = paddle.CPUPlace()

        exe = paddle.static.Executor(place)
        exe.run(startup_prog)

        print("Start run on {}".format(place))
        for epoch in range(100):

            pred_res, loss_res = exe.run(main_prog,
                                         feed={
                                             "a": a_np,
                                             "b": b_np,
                                             "label": label_np
                                         },
                                         fetch_list=[prediction, loss])
            if epoch % 10 == 0:
                print("Epoch {} | Prediction[0]: {}, Loss: {}".format(
                    epoch, pred_res[0], loss_res))

        return pred_res, loss_res
Exemplo n.º 24
0
    def forward(self, dnn_logits, first_order, combined_features):
        """
        first_order: FM first order (batch_size, 1)
        combined_features: FM sparse features (batch_size, sparse_field_num + 1, embedding_size)
        """
        # sum square part
        # (batch_size, embedding_size)
        summed_features_emb = paddle.sum(combined_features, axis=1)
        summed_features_emb_square = paddle.square(summed_features_emb)

        # square sum part
        squared_features_emb = paddle.square(combined_features)

        # (batch_size, embedding_size)
        squared_sum_features_emb = paddle.sum(squared_features_emb, axis=1)

        # (batch_size, 1)
        logits = first_order + 0.5 * paddle.sum(
            summed_features_emb_square - squared_sum_features_emb,
            axis=1,
            keepdim=True) + self.bias + dnn_logits
        return fun.sigmoid(logits)
Exemplo n.º 25
0
    def infer_forward(self, dy_model, metrics_list, batch_data, config):
        out, src = self.create_feeds(batch_data, config)
        prediction = dy_model.forward(src)

        idx = out.numpy().nonzero()
        SE = paddle.square(prediction[idx] - out[idx]).sum()
        num = np.count_nonzero(out)
        # print_dict format :{'loss': loss}
        print_dict = {
            'SE': SE,
            'num': num,
        }
        return metrics_list, print_dict
Exemplo n.º 26
0
    def build_P_hat_paddle(self, C, P):
        F = self.F
        eps = self.eps
        n = P.shape[0]
        P_tile = paddle.tile(paddle.unsqueeze(P, axis=1), (1, F, 1))
        C_tile = paddle.unsqueeze(C, axis=0)
        P_diff = P_tile - C_tile
        rbf_norm = paddle.norm(P_diff, p=2, axis=2, keepdim=False)

        rbf = paddle.multiply(paddle.square(rbf_norm),
                              paddle.log(rbf_norm + eps))
        P_hat = paddle.concat([paddle.ones((n, 1)), P, rbf], axis=1)
        return P_hat
Exemplo n.º 27
0
    def forward(self, input, label):
        label.stop_gradient = True

        input_norm = paddle.sqrt(
            paddle.sum(paddle.square(input), axis=1, keepdim=True))
        input = paddle.divide(input, input_norm)

        weight_norm = paddle.sqrt(
            paddle.sum(paddle.square(self.weight), axis=0, keepdim=True))
        weight = paddle.divide(self.weight, weight_norm)

        cos = paddle.matmul(input, weight)
        if not self.training or label is None:
            return cos

        cos_m = cos - self.margin

        one_hot = paddle.nn.functional.one_hot(label, self.class_num)
        one_hot = paddle.squeeze(one_hot, axis=[1])
        output = paddle.multiply(one_hot, cos_m) + paddle.multiply(
            (1.0 - one_hot), cos)
        output = output * self.scale
        return output
Exemplo n.º 28
0
    def __call__(self, input, target):
        """
        inputs: network output: {"features: xxx", "logits": xxxx}
        target: image label
        """
        feats = input["features"]
        labels = target
        batch_size = feats.shape[0]

        #calc feat * feat
        dist1 = paddle.sum(paddle.square(feats), axis=1, keepdim=True)
        dist1 = paddle.expand(dist1, [batch_size, self.num_classes])

        #dist2 of centers
        dist2 = paddle.sum(paddle.square(self.centers), axis=1,
                           keepdim=True)  #num_classes
        dist2 = paddle.expand(dist2,
                              [self.num_classes, batch_size]).astype("float64")
        dist2 = paddle.transpose(dist2, [1, 0])

        #first x * x + y * y
        distmat = paddle.add(dist1, dist2)
        tmp = paddle.matmul(feats, paddle.transpose(self.centers, [1, 0]))
        distmat = distmat - 2.0 * tmp

        #generate the mask
        classes = paddle.arange(self.num_classes).astype("int64")
        labels = paddle.expand(paddle.unsqueeze(labels, 1),
                               (batch_size, self.num_classes))
        mask = paddle.equal(
            paddle.expand(classes, [batch_size, self.num_classes]),
            labels).astype("float64")  #get mask

        dist = paddle.multiply(distmat, mask)
        loss = paddle.sum(paddle.clip(dist, min=1e-12, max=1e+12)) / batch_size

        return {'CenterLoss': loss}
Exemplo n.º 29
0
    def forward(self, input, label):
        # norm input
        input_norm = paddle.sqrt(
            paddle.sum(paddle.square(input), axis=1, keepdim=True))
        input = paddle.divide(input, input_norm)  # support broadcast
        # norm weight
        weight = self.fc0.weight
        w_square = paddle.square(weight)  #[512,2500]
        w_sum = paddle.sum(w_square, axis=0, keepdim=True)  #[1,2500]
        weight_norm = paddle.sqrt(w_sum)
        weight = paddle.divide(weight, weight_norm)

        # # norm input
        # input = paddle.fluid.layers.l2_normalize(input,axis =-1)
        # # norm weight
        # weight = paddle.fluid.layers.l2_normalize(self.fc0.weight,axis =-1)

        # get cos(sita)
        cos = paddle.matmul(input, weight)
        sin = paddle.sqrt(1.0 - paddle.square(cos) + 1e-6)
        cos_m = math.cos(self.margin)
        sin_m = math.sin(self.margin)
        phi = cos * cos_m - sin * sin_m
        # if use easy_margin
        th = math.cos(self.margin) * (-1)
        mm = math.sin(self.margin) * self.margin
        if self.easy_margin:
            phi = self._paddle_where_more_than(cos, 0, phi, cos)
        else:
            phi = self._paddle_where_more_than(cos, th, phi, cos - mm)
        # use label
        one_hot = paddle.nn.functional.one_hot(label, self.class_dim)
        one_hot = paddle.squeeze(one_hot, axis=[1])
        output = paddle.multiply(one_hot, phi) + paddle.multiply(
            (1.0 - one_hot), cos)
        output = output * self.scale
        return output
Exemplo n.º 30
0
    def build_P_hat_paddle(self, C, P):
        F = self.F
        eps = self.eps
        n = P.shape[0]  # n (= self.I_r_width x self.I_r_height)
        # P_tile: n x 2 -> n x 1 x 2 -> n x F x 2
        P_tile = paddle.tile(paddle.unsqueeze(P, axis=1), (1, F, 1))
        C_tile = paddle.unsqueeze(C, axis=0)  # 1 x F x 2
        P_diff = P_tile - C_tile  # n x F x 2
        # rbf_norm: n x F
        rbf_norm = paddle.norm(P_diff, p=2, axis=2, keepdim=False)

        # rbf: n x F
        rbf = paddle.multiply(paddle.square(rbf_norm),
                              paddle.log(rbf_norm + eps))
        P_hat = paddle.concat([paddle.ones((n, 1)), P, rbf], axis=1)
        return P_hat  # n x F+3