コード例 #1
0
ファイル: circlemargin.py プロジェクト: lvjian0706/PaddleClas
    def forward(self, input, label):
        feat_norm = paddle.sqrt(
            paddle.sum(paddle.square(input), axis=1, keepdim=True))
        input = paddle.divide(input, feat_norm)

        weight_norm = paddle.sqrt(
            paddle.sum(paddle.square(self.weight), axis=0, keepdim=True))
        weight = paddle.divide(self.weight, weight_norm)

        logits = paddle.matmul(input, weight)
        if not self.training or label is None:
            return logits

        alpha_p = paddle.clip(-logits.detach() + 1 + self.margin, min=0.)
        alpha_n = paddle.clip(logits.detach() + self.margin, min=0.)
        delta_p = 1 - self.margin
        delta_n = self.margin

        m_hot = F.one_hot(label.reshape([-1]), num_classes=logits.shape[1])

        logits_p = alpha_p * (logits - delta_p)
        logits_n = alpha_n * (logits - delta_n)
        pre_logits = logits_p * m_hot + logits_n * (1 - m_hot)
        pre_logits = self.scale * pre_logits

        return pre_logits
コード例 #2
0
    def forward(self, true_binary, rule_masks, raw_logits):
        """
        tbd
        """
        if cmd_args.loss_type == 'binary':
            exp_pred = paddle.exp(raw_logits) * rule_masks

            norm = paddle.sum(exp_pred, axis=2, keepdim=True)
            prob = paddle.divide(exp_pred, norm)

            return F.binary_cross_entropy(
                prob, true_binary) * cmd_args.max_decode_steps

        if cmd_args.loss_type == 'perplexity':
            my_perp_loss = MyPerpLoss()
            return my_perp_loss(true_binary, rule_masks, raw_logits)

        if cmd_args.loss_type == 'vanilla':
            exp_pred = paddle.exp(raw_logits) * rule_masks + 1e-30
            norm = paddle.sum(exp_pred, 2, keepdim=True)
            prob = paddle.divide(exp_pred, norm)

            ll = paddle.abs(paddle.sum(true_binary * prob, 2))
            mask = 1 - rule_masks[:, :, -1]
            logll = mask * paddle.log(ll)

            loss = -paddle.sum(logll) / true_binary.shape[1]

            return loss
        print('unknown loss type %s' % cmd_args.loss_type)
        raise NotImplementedError
コード例 #3
0
ファイル: arcmargin.py プロジェクト: lvjian0706/PaddleClas
    def forward(self, input, label=None):
        input_norm = paddle.sqrt(
            paddle.sum(paddle.square(input), axis=1, keepdim=True))
        input = paddle.divide(input, input_norm)

        weight_norm = paddle.sqrt(
            paddle.sum(paddle.square(self.weight), axis=0, keepdim=True))
        weight = paddle.divide(self.weight, weight_norm)

        cos = paddle.matmul(input, weight)
        if not self.training or label is None:
            return cos
        sin = paddle.sqrt(1.0 - paddle.square(cos) + 1e-6)
        cos_m = math.cos(self.margin)
        sin_m = math.sin(self.margin)
        phi = cos * cos_m - sin * sin_m

        th = math.cos(self.margin) * (-1)
        mm = math.sin(self.margin) * self.margin
        if self.easy_margin:
            phi = self._paddle_where_more_than(cos, 0, phi, cos)
        else:
            phi = self._paddle_where_more_than(cos, th, phi, cos - mm)

        one_hot = paddle.nn.functional.one_hot(label, self.class_num)
        one_hot = paddle.squeeze(one_hot, axis=[1])
        output = paddle.multiply(one_hot, phi) + paddle.multiply(
            (1.0 - one_hot), cos)
        output = output * self.scale
        return output
コード例 #4
0
 def build_P_paddle(self, I_r_size):
     I_r_width, I_r_height = I_r_size
     I_r_grid_x = paddle.divide(
         (paddle.arange(-I_r_width, I_r_width, 2).astype('float32') + 1.0),
         paddle.to_tensor(I_r_width).astype('float32'))
     I_r_grid_y = paddle.divide(
         (paddle.arange(-I_r_height, I_r_height, 2).astype('float32') +
          1.0),
         paddle.to_tensor(I_r_height).astype('float32'))
     P = paddle.stack(paddle.meshgrid(I_r_grid_x, I_r_grid_y), axis=2)
     P = paddle.transpose(P, perm=[1, 0, 2])
     return P.reshape([-1, 2])
コード例 #5
0
ファイル: base_model.py プロジェクト: GuoxiaWang/PLSC
    def _margin_softmax(input, label, out_dim, param_attr, margin1, margin2,
                        margin3, scale, sample_ratio):
        input_norm = paddle.sqrt(
            paddle.sum(paddle.square(input), axis=1, keepdim=True))
        input = paddle.divide(input, input_norm)

        if param_attr is None:
            param_attr = paddle.ParamAttr(
                initializer=paddle.nn.initializer.XavierNormal(fan_in=0.0))
        weight = paddle.static.create_parameter(
            shape=[input.shape[1], out_dim],
            dtype='float32',
            name=unique_name.generate('final_fc_w'),
            attr=param_attr)

        if sample_ratio < 1.0:
            # partial fc sample process
            label, sampled_class_index = class_center_sample(
                label, out_dim, ratio=sample_ratio, ignore_label=-1)
            sampled_class_index.stop_gradient = True
            weight = paddle.gather(weight, sampled_class_index, axis=1)
            out_dim = paddle.shape(sampled_class_index)

        weight_norm = paddle.sqrt(
            paddle.sum(paddle.square(weight), axis=0, keepdim=True))
        weight = paddle.divide(weight, weight_norm)
        cos = paddle.matmul(input, weight)

        theta = paddle.acos(cos)
        if margin1 != 1.0:
            theta = margin1 * theta
        if margin2 != 0.0:
            theta = theta + margin2
        margin_cos = paddle.cos(theta)
        if margin3 != 0.0:
            margin_cos = margin_cos - margin3

        one_hot = paddle.nn.functional.one_hot(label, num_classes=out_dim)
        diff = paddle.multiply(paddle.subtract(margin_cos, cos), one_hot)
        target_cos = paddle.add(cos, diff)
        logit = paddle.scale(target_cos, scale=scale)

        loss, prob = paddle.nn.functional.softmax_with_cross_entropy(
            logits=logit,
            label=paddle.reshape(label, (-1, 1)),
            return_softmax=True)
        avg_loss = paddle.mean(x=loss)

        one_hot.stop_gradient = True

        return avg_loss, prob
コード例 #6
0
    def __measure_parameterless(self, state, which_qubits, result_desired):
        r"""进行 01 测量。

        Args:
            state (Tensor): 输入的量子态
            which_qubits (list): 测量作用的量子比特编号
            result_desired (str): 期望得到的测量结果

        Returns:
            Tensor: 测量坍塌后的量子态
            Tensor:测量坍塌得到的概率
            str: 测量得到的结果
        """
        n = self.get_qubit_number()
        assert len(which_qubits) == len(result_desired), \
            "the length of qubits wanted to be measured and the result desired should be same"
        op_list = [np.eye(2, dtype=np.complex128)] * n
        for i, ele in zip(which_qubits, result_desired):
            k = int(ele)
            rho = np.zeros((2, 2), dtype=np.complex128)
            rho[int(k), int(k)] = 1
            op_list[i] = rho
        if n > 1:
            measure_operator = paddle.to_tensor(NKron(*op_list))
        else:
            measure_operator = paddle.to_tensor(op_list[0])
        state_measured = matmul(matmul(measure_operator, state),
                                dagger(measure_operator))
        prob = real(
            trace(
                matmul(matmul(dagger(measure_operator), measure_operator),
                       state)))
        state_measured = divide(state_measured, prob)
        return state_measured, prob, result_desired
コード例 #7
0
    def sqrt_newton_schulz_autograd(self, A, numIters):
        A_shape = A.shape
        batchSize = A_shape[0]
        dim = A_shape[1]

        normA = A * A
        normA = paddle.sum(normA, axis=1)
        normA = paddle.sum(normA, axis=1)
        normA = paddle.sqrt(normA)
        normA1 = normA.reshape([batchSize, 1, 1])
        Y = paddle.divide(A, paddle.expand_as(normA1, A))
        I = paddle.eye(dim, dim).reshape([1, dim, dim])
        l0 = []
        for i in range(batchSize):
            l0.append(I)
        I = paddle.concat(l0, axis=0)
        I.stop_gradient = False
        Z = paddle.eye(dim, dim).reshape([1, dim, dim])
        l1 = []
        for i in range(batchSize):
            l1.append(Z)
        Z = paddle.concat(l1, axis=0)
        Z.stop_gradient = False

        for i in range(numIters):
            T = 0.5 * (3.0 * I - Z.bmm(Y))
            Y = Y.bmm(T)
            Z = T.bmm(Z)
        sA = Y * paddle.sqrt(normA1).reshape([batchSize, 1, 1])
        sA = paddle.expand_as(sA, A)
        return sA
コード例 #8
0
    def forward(self, x):
        residual = x

        t = self.t(x)
        p = self.p(x)
        g = self.g(x)

        b, c, h, w = t.shape

        t = paddle.transpose(paddle.reshape(t, (b, c, -1)), (0, 2, 1))
        p = paddle.reshape(p, (b, c, -1))
        g = paddle.transpose(paddle.reshape(g, (b, c, -1)), (0, 2, 1))

        att = paddle.bmm(t, p)

        if self.use_scale:
            att = paddle.divide(att, paddle.to_tensor(c**0.5))

        att = self.softmax(att)
        x = paddle.bmm(att, g)

        x = paddle.transpose(x, (0, 2, 1))
        x = paddle.reshape(x, (b, c, h, w))

        x = self.z(x)
        x = self.bn(x) + residual

        return x
コード例 #9
0
    def test_name(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2, 3], dtype="float32")
            y = fluid.data(name='y', shape=[2, 3], dtype='float32')

            y_1 = paddle.divide(x, y, name='div_res')
            self.assertEqual(('div_res' in y_1.name), True)
コード例 #10
0
    def infer_net(self, inputs):
        def embedding_layer(input, table_name, initializer_instance=None):
            emb = paddle.static.nn.embedding(
                input=input,
                size=[self.sparse_feature_number, self.sparse_feature_dim],
                param_attr=table_name)
            return emb

        all_label = np.arange(self.sparse_feature_number).reshape(
            self.sparse_feature_number).astype('int32')
        self.all_label = paddle.cast(x=paddle.nn.functional.assign(all_label),
                                     dtype='int64')
        emb_all_label = embedding_layer(self.all_label, "emb")
        emb_a = embedding_layer(inputs[0], "emb")
        emb_b = embedding_layer(inputs[1], "emb")
        emb_c = embedding_layer(inputs[2], "emb")

        target = paddle.add(x=paddle.fluid.layers.nn.elementwise_sub(
            emb_b, emb_a),
                            y=emb_c)

        emb_all_label_l2 = paddle.fluid.layers.l2_normalize(x=emb_all_label,
                                                            axis=1)
        dist = paddle.fluid.layers.matmul(x=target,
                                          y=emb_all_label_l2,
                                          transpose_y=True)
        values, pred_idx = paddle.topk(x=dist, k=1)
        label = paddle.fluid.layers.expand(paddle.unsqueeze(inputs[3],
                                                            axis=[1]),
                                           expand_times=[1, 1])
        label_ones = paddle.fluid.layers.fill_constant_batch_size_like(
            label, shape=[-1, 1], value=1.0, dtype='float32')
        right_cnt = paddle.sum(
            x=paddle.cast(paddle.equal(x=pred_idx, y=label), dtype='float32'))
        total_cnt = paddle.sum(x=label_ones)

        global_right_cnt = paddle.fluid.layers.create_global_var(
            name="global_right_cnt",
            persistable=True,
            dtype='float32',
            shape=[1],
            value=0)
        global_total_cnt = paddle.fluid.layers.create_global_var(
            name="global_total_cnt",
            persistable=True,
            dtype='float32',
            shape=[1],
            value=0)
        global_right_cnt.stop_gradient = True
        global_total_cnt.stop_gradient = True

        tmp1 = paddle.add(x=right_cnt, y=global_right_cnt)
        paddle.nn.functional.assign(tmp1, global_right_cnt)
        tmp2 = paddle.add(x=total_cnt, y=global_total_cnt)
        paddle.nn.functional.assign(tmp2, global_total_cnt)

        acc = paddle.divide(x=global_right_cnt,
                            y=global_total_cnt,
                            name="total_acc")
        self._infer_results['acc'] = acc
コード例 #11
0
    def __call__(self, hm, wh, reg, im_shape, scale_factor):
        heat = self._simple_nms(hm)
        scores, inds, clses, ys, xs = self._topk(heat)
        scores = paddle.tensor.unsqueeze(scores, [1])
        clses = paddle.tensor.unsqueeze(clses, [1])

        reg_t = paddle.transpose(reg, [0, 2, 3, 1])
        # Like TTFBox, batch size is 1.
        # TODO: support batch size > 1
        reg = paddle.reshape(reg_t, [-1, paddle.shape(reg_t)[-1]])
        reg = paddle.gather(reg, inds)
        xs = paddle.cast(xs, 'float32')
        ys = paddle.cast(ys, 'float32')
        xs = xs + reg[:, 0:1]
        ys = ys + reg[:, 1:2]

        wh_t = paddle.transpose(wh, [0, 2, 3, 1])
        wh = paddle.reshape(wh_t, [-1, paddle.shape(wh_t)[-1]])
        wh = paddle.gather(wh, inds)

        if self.regress_ltrb:
            x1 = xs - wh[:, 0:1]
            y1 = ys - wh[:, 1:2]
            x2 = xs + wh[:, 2:3]
            y2 = ys + wh[:, 3:4]
        else:
            x1 = xs - wh[:, 0:1] / 2
            y1 = ys - wh[:, 1:2] / 2
            x2 = xs + wh[:, 0:1] / 2
            y2 = ys + wh[:, 1:2] / 2

        n, c, feat_h, feat_w = hm.shape[:]
        padw = (feat_w * self.down_ratio - im_shape[0, 1]) / 2
        padh = (feat_h * self.down_ratio - im_shape[0, 0]) / 2
        x1 = x1 * self.down_ratio
        y1 = y1 * self.down_ratio
        x2 = x2 * self.down_ratio
        y2 = y2 * self.down_ratio

        x1 = x1 - padw
        y1 = y1 - padh
        x2 = x2 - padw
        y2 = y2 - padh

        bboxes = paddle.concat([x1, y1, x2, y2], axis=1)
        scale_y = scale_factor[:, 0:1]
        scale_x = scale_factor[:, 1:2]
        scale_expand = paddle.concat([scale_x, scale_y, scale_x, scale_y],
                                     axis=1)
        boxes_shape = paddle.shape(bboxes)
        boxes_shape.stop_gradient = True
        scale_expand = paddle.expand(scale_expand, shape=boxes_shape)
        bboxes = paddle.divide(bboxes, scale_expand)
        if self.for_mot:
            results = paddle.concat([bboxes, scores, clses], axis=1)
            return results, inds
        else:
            results = paddle.concat([clses, scores, bboxes], axis=1)
            return results, paddle.shape(results)[0:1]
コード例 #12
0
ファイル: train.py プロジェクト: 965784749-rgb/PaddleRec
def get_acc(x, y, batch_size):
    less = paddle.cast(paddle.less_than(x, y), dtype='float32')
    label_ones = paddle.full(dtype='float32',
                             shape=[batch_size, 1],
                             fill_value=1.0)
    correct = paddle.sum(less)
    total = paddle.sum(label_ones)
    acc = paddle.divide(correct, total)
    return acc
コード例 #13
0
    def __measure_parameterized(self, state, which_qubits, result_desired,
                                theta):
        r"""进行参数化的测量。

        Args:
            state (Tensor): 输入的量子态
            which_qubits (list): 测量作用的量子比特编号
            result_desired (str): 期望得到的测量结果
            theta (Tensor): 测量运算的参数

        Returns:
            Tensor: 测量坍塌后的量子态
            Tensor:测量坍塌得到的概率
            str: 测量得到的结果
        """
        n = self.get_qubit_number()
        assert len(which_qubits) == len(result_desired), \
            "the length of qubits wanted to be measured and the result desired should be same"
        op_list = [paddle.to_tensor(np.eye(2, dtype=np.complex128))] * n
        for idx in range(0, len(which_qubits)):
            i = which_qubits[idx]
            ele = result_desired[idx]
            if int(ele) == 0:
                basis0 = paddle.to_tensor(
                    np.array([[1, 0], [0, 0]], dtype=np.complex128))
                basis1 = paddle.to_tensor(
                    np.array([[0, 0], [0, 1]], dtype=np.complex128))
                rho0 = multiply(basis0, cos(theta[idx]))
                rho1 = multiply(basis1, sin(theta[idx]))
                rho = add(rho0, rho1)
                op_list[i] = rho
            elif int(ele) == 1:
                # rho = diag(concat([cos(theta[idx]), sin(theta[idx])]))
                # rho = paddle.to_tensor(rho, zeros((2, 2), dtype="float64"))
                basis0 = paddle.to_tensor(
                    np.array([[1, 0], [0, 0]], dtype=np.complex128))
                basis1 = paddle.to_tensor(
                    np.array([[0, 0], [0, 1]], dtype=np.complex128))
                rho0 = multiply(basis0, sin(theta[idx]))
                rho1 = multiply(basis1, cos(theta[idx]))
                rho = add(rho0, rho1)
                op_list[i] = rho
            else:
                print("cannot recognize the result_desired.")
            # rho = paddle.to_tensor(ones((2, 2), dtype="float64"), zeros((2, 2), dtype="float64"))
        measure_operator = paddle.to_tensor(op_list[0])
        if n > 1:
            for idx in range(1, len(op_list)):
                measure_operator = kron(measure_operator, op_list[idx])
        state_measured = matmul(matmul(measure_operator, state),
                                dagger(measure_operator))
        prob = real(
            trace(
                matmul(matmul(dagger(measure_operator), measure_operator),
                       state)))
        state_measured = divide(state_measured, prob)
        return state_measured, prob, result_desired
コード例 #14
0
    def __call__(self, predicts, batch):
        if isinstance(predicts, (list, tuple)):
            predicts = predicts[-1]

        B, N = predicts.shape[:2]
        div = paddle.to_tensor([N]).astype('float32')

        predicts = nn.functional.softmax(predicts, axis=-1)
        aggregation_preds = paddle.sum(predicts, axis=1)
        aggregation_preds = paddle.divide(aggregation_preds, div)

        length = batch[2].astype("float32")
        batch = batch[3].astype("float32")
        batch[:, 0] = paddle.subtract(div, length)
        batch = paddle.divide(batch, div)

        loss = self.loss_func(aggregation_preds, batch)
        return {"loss_ace": loss}
コード例 #15
0
 def test_dygraph(self):
     with fluid.dygraph.guard():
         np_x = np.array([2, 3, 4]).astype('float64')
         np_y = np.array([1, 5, 2]).astype('float64')
         x = paddle.to_tensor(np_x)
         y = paddle.to_tensor(np_y)
         z = paddle.divide(x, y)
         np_z = z.numpy()
         z_expected = np.array([2., 0.6, 2.])
         self.assertEqual((np_z == z_expected).all(), True)
コード例 #16
0
    def forward(self, similarities_matrix, query_img_id, gallery_img_id,
                keep_mask):
        metric_dict = dict()

        choosen_indices = paddle.argsort(similarities_matrix,
                                         axis=1,
                                         descending=True)
        gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0])
        gallery_labels_transpose = paddle.broadcast_to(
            gallery_labels_transpose,
            shape=[
                choosen_indices.shape[0], gallery_labels_transpose.shape[1]
            ])
        choosen_label = paddle.index_sample(gallery_labels_transpose,
                                            choosen_indices)
        equal_flag = paddle.equal(choosen_label, query_img_id)
        if keep_mask is not None:
            keep_mask = paddle.index_sample(keep_mask.astype('float32'),
                                            choosen_indices)
            equal_flag = paddle.logical_and(equal_flag,
                                            keep_mask.astype('bool'))
        equal_flag = paddle.cast(equal_flag, 'float32')

        num_rel = paddle.sum(equal_flag, axis=1)
        num_rel = paddle.greater_than(num_rel, paddle.to_tensor(0.))
        num_rel_index = paddle.nonzero(num_rel.astype("int"))
        num_rel_index = paddle.reshape(num_rel_index, [num_rel_index.shape[0]])
        equal_flag = paddle.index_select(equal_flag, num_rel_index, axis=0)

        #do accumulative sum
        div = paddle.arange(equal_flag.shape[1]).astype("float32") + 2
        minus = paddle.divide(equal_flag, div)
        auxilary = paddle.subtract(equal_flag, minus)
        hard_index = paddle.argmax(auxilary, axis=1).astype("float32")
        all_INP = paddle.divide(paddle.sum(equal_flag, axis=1), hard_index)
        mINP = paddle.mean(all_INP)
        metric_dict["mINP"] = mINP.numpy()[0]
        return metric_dict
コード例 #17
0
 def _dice_loss(self, input, target):
     input = fluid.layers.reshape(input,
                                  shape=(fluid.layers.shape(input)[0], -1))
     target = fluid.layers.reshape(target,
                                   shape=(fluid.layers.shape(target)[0],
                                          -1))
     target = fluid.layers.cast(target, 'float32')
     a = fluid.layers.reduce_sum(paddle.multiply(input, target), dim=1)
     b = fluid.layers.reduce_sum(paddle.multiply(input, input),
                                 dim=1) + 0.001
     c = fluid.layers.reduce_sum(paddle.multiply(target, target),
                                 dim=1) + 0.001
     d = paddle.divide((2 * a), paddle.add(b, c))
     return 1 - d
コード例 #18
0
ファイル: crops.py プロジェクト: hysunflower/PaddleSeg
    def inv_transform(self, prob_map):
        if self._counts is None:
            return prob_map

        new_prob_map = paddle.zeros((1, 1, *self._counts.shape), dtype=prob_map.dtype)

        crop_indx = 0
        for dy in self.y_offsets:
            for dx in self.x_offsets:
                new_prob_map[0, 0, dy:dy + self.crop_height, dx:dx + self.crop_width] += prob_map[crop_indx, 0]
                crop_indx += 1
        new_prob_map = paddle.divide(new_prob_map, self._counts)

        return new_prob_map
コード例 #19
0
ファイル: cosmargin.py プロジェクト: lvjian0706/PaddleClas
    def forward(self, input, label):
        label.stop_gradient = True

        input_norm = paddle.sqrt(
            paddle.sum(paddle.square(input), axis=1, keepdim=True))
        input = paddle.divide(input, input_norm)

        weight_norm = paddle.sqrt(
            paddle.sum(paddle.square(self.weight), axis=0, keepdim=True))
        weight = paddle.divide(self.weight, weight_norm)

        cos = paddle.matmul(input, weight)
        if not self.training or label is None:
            return cos

        cos_m = cos - self.margin

        one_hot = paddle.nn.functional.one_hot(label, self.class_num)
        one_hot = paddle.squeeze(one_hot, axis=[1])
        output = paddle.multiply(one_hot, cos_m) + paddle.multiply(
            (1.0 - one_hot), cos)
        output = output * self.scale
        return output
コード例 #20
0
    def forward(self, input, label):
        # norm input
        input_norm = paddle.sqrt(
            paddle.sum(paddle.square(input), axis=1, keepdim=True))
        input = paddle.divide(input, input_norm)  # support broadcast
        # norm weight
        weight = self.fc0.weight
        w_square = paddle.square(weight)  #[512,2500]
        w_sum = paddle.sum(w_square, axis=0, keepdim=True)  #[1,2500]
        weight_norm = paddle.sqrt(w_sum)
        weight = paddle.divide(weight, weight_norm)

        # # norm input
        # input = paddle.fluid.layers.l2_normalize(input,axis =-1)
        # # norm weight
        # weight = paddle.fluid.layers.l2_normalize(self.fc0.weight,axis =-1)

        # get cos(sita)
        cos = paddle.matmul(input, weight)
        sin = paddle.sqrt(1.0 - paddle.square(cos) + 1e-6)
        cos_m = math.cos(self.margin)
        sin_m = math.sin(self.margin)
        phi = cos * cos_m - sin * sin_m
        # if use easy_margin
        th = math.cos(self.margin) * (-1)
        mm = math.sin(self.margin) * self.margin
        if self.easy_margin:
            phi = self._paddle_where_more_than(cos, 0, phi, cos)
        else:
            phi = self._paddle_where_more_than(cos, th, phi, cos - mm)
        # use label
        one_hot = paddle.nn.functional.one_hot(label, self.class_dim)
        one_hot = paddle.squeeze(one_hot, axis=[1])
        output = paddle.multiply(one_hot, phi) + paddle.multiply(
            (1.0 - one_hot), cos)
        output = output * self.scale
        return output
コード例 #21
0
ファイル: losses.py プロジェクト: geoyee/PdRSCD
 def forward(self, logit, label=None):
     N, fea_dim = logit.shape[:2]
     logit_norm = paddle.sqrt(paddle.sum(paddle.square(logit), axis=1)).reshape((N, 1, -1))
     logit = paddle.divide(logit, logit_norm)
     output = paddle.reshape(logit, shape=[-1, 3, fea_dim])
     anchor, positive, negative = paddle.split(output, num_or_sections=3, axis=1)
     anchor = paddle.reshape(anchor, shape=[-1, fea_dim])
     positive = paddle.reshape(positive, shape=[-1, fea_dim])
     negative = paddle.reshape(negative, shape=[-1, fea_dim])
     a_p = paddle.square(anchor - positive)
     a_n = paddle.square(anchor - negative)
     a_p = paddle.sum(a_p, axis=1)
     a_n = paddle.sum(a_n, axis=1)
     loss = F.relu(a_p + self.margin - a_n)
     return loss
コード例 #22
0
    def transfer(self, source, reference, with_face=False):
        source_input, face, crop_face = self.preprocess(source)
        reference_input, face, crop_face = self.preprocess(reference)

        consis_mask = np.float32(
            calculate_consis_mask(source_input[1], reference_input[1]))
        consis_mask = paddle.to_tensor(np.expand_dims(consis_mask, 0))

        if not (source_input and reference_input):
            if with_face:
                return None, None
            return
        for i in range(len(source_input) - 1):
            source_input[i] = paddle.to_tensor(
                np.expand_dims(source_input[i], 0))

        for i in range(len(reference_input) - 1):
            reference_input[i] = paddle.to_tensor(
                np.expand_dims(reference_input[i], 0))

        input_data = {
            'image_A': source_input[0],
            'image_B': reference_input[0],
            'mask_A_aug': source_input[1],
            'mask_B_aug': reference_input[1],
            'P_A': source_input[2],
            'P_B': reference_input[2],
            'consis_mask': consis_mask
        }
        state_dicts = load(self.model_path)
        net = getattr(self.model, 'netG')
        net.set_dict(state_dicts['netG'])
        result, _ = self.model.test(input_data)
        print('result shape: ', result.shape)
        min_, max_ = result.min(), result.max()
        result += -min_
        result = paddle.divide(result, max_ - min_ + 1e-5)
        img = toImage(result)

        if with_face:
            return img, crop_face
        img.save('before.png')

        return img
コード例 #23
0
    def test_dygraph(self):
        for place in self.places:
            with fluid.dygraph.guard(place):
                # rule 1 : avoid numpy.ndarray
                np_x = np.array([2, 3, 4])
                np_y = np.array([1, 5, 2])
                x = paddle.to_tensor(np_x)
                self.assertRaises(TypeError, paddle.divide, x=x, y=np_y)

                # rule 2: both the inputs are not Tensor
                z = paddle.divide(3, 2)
                self.assertEqual(z.numpy()[0] == 1.5, True)

                # rule 3: both the inputs are Tensor
                np_x = np.array([2, 3, 4])
                np_y = np.array([1, 5, 2])
                x = paddle.to_tensor(np_x, dtype="float32")
                y = paddle.to_tensor(np_y, dtype="float64")
                self.assertRaises(TypeError, paddle.divide, x=x, y=y)

                # rule 4: x is Tensor, y is scalar
                np_x = np.array([2, 3, 4])
                x = paddle.to_tensor(np_x, dtype="int32")
                y = 2
                z = x / y
                z_expected = np.array([1., 1.5, 2.])
                self.assertEqual((z_expected == z.numpy()).all(), True)

                # rule 5: y is Tensor, x is scalar
                np_x = np.array([2, 1, 4])
                x = paddle.to_tensor(np_x, dtype="int32")
                y = 2
                z = y / x
                z_expected = np.array([1., 2., 0.5])
                self.assertEqual((z_expected == z.numpy()).all(), True)

                # rule 6: y is Tensor, x is Tensor
                np_x = np.array([2, 3, 4])
                np_y = np.array([1, 5, 2])
                x = paddle.to_tensor(np_x)
                y = paddle.to_tensor(np_y)
                z = x / y
                z_expected = np.array([2., 0.6, 2.])
                self.assertEqual((z_expected == z.numpy()).all(), True)
コード例 #24
0
    def transfer(self, source, reference, with_face=False):
        source_input, face, crop_face = self.preprocess(source)
        reference_input, face, crop_face = self.preprocess(reference)

        consis_mask = np.float32(
            calculate_consis_mask(source_input[1], reference_input[1]))
        consis_mask = paddle.to_tensor(np.expand_dims(consis_mask, 0))

        if not (source_input and reference_input):
            if with_face:
                return None, None
            return

        for i in range(1, len(source_input) - 1):
            source_input[i] = paddle.to_tensor(
                np.expand_dims(source_input[i], 0))

        for i in range(1, len(reference_input) - 1):
            reference_input[i] = paddle.to_tensor(
                np.expand_dims(reference_input[i], 0))

        input_data = {
            'image_A': source_input[0],
            'image_B': reference_input[0],
            'mask_A_aug': source_input[1],
            'mask_B_aug': reference_input[1],
            'P_A': source_input[2],
            'P_B': reference_input[2],
            'consis_mask': consis_mask
        }

        state_dicts = load(self.model_path)
        for net_name, net in self.model.nets.items():
            net.set_state_dict(state_dicts[net_name])
        result, _ = self.model.test(input_data)
        min_, max_ = result.min(), result.max()
        result += -min_
        result = paddle.divide(result, max_ - min_ + 1e-5)
        img = toImage(result)

        if with_face:
            return img, crop_face

        return img
コード例 #25
0
    def forward(self, true_binary, rule_masks, input_logits):
        """
        tbd
        """
        b = paddle.max(input_logits, 2, keepdim=True)[0]
        raw_logits = input_logits - b
        exp_pred = paddle.exp(raw_logits) * rule_masks + 1e-30

        norm = paddle.sum(exp_pred, 2, keepdim=True)
        prob = paddle.divide(exp_pred, norm)

        ll = paddle.abs(paddle.sum(true_binary * prob, 2))

        mask = 1 - rule_masks[:, :, -1]

        logll = mask * paddle.log(ll)

        loss = -paddle.sum(logll) / true_binary.shape[1]

        return loss
コード例 #26
0
ファイル: model.py プロジェクト: 965784749-rgb/PaddleRec
    def net(self, input, is_infer=False):
        """ network"""
        if is_infer:
            self.batch_size = envs.get_global_env(
                "dataset.inferdata.batch_size")
        else:
            self.batch_size = envs.get_global_env(
                "dataset.sample_1.batch_size")
        tagspace_model = TagspaceLayer(self.vocab_text_size,
                                       self.vocab_tag_size, self.emb_dim,
                                       self.hid_dim, self.win_size,
                                       self.margin, self.neg_size,
                                       self.text_len)
        cos_pos, cos_neg = tagspace_model(input)
        # calculate hinge loss
        loss_part1 = paddle.subtract(
            paddle.full(shape=[self.batch_size, 1],
                        fill_value=self.margin,
                        dtype='float32'), cos_pos)
        loss_part2 = paddle.add(loss_part1, cos_neg)
        loss_part3 = paddle.maximum(
            paddle.full(shape=[self.batch_size, 1],
                        fill_value=0.0,
                        dtype='float32'), loss_part2)
        avg_cost = paddle.mean(loss_part3)

        less = paddle.cast(paddle.less_than(cos_neg, cos_pos), dtype='float32')
        label_ones = paddle.full(dtype='float32',
                                 shape=[self.batch_size, 1],
                                 fill_value=1.0)
        correct = paddle.sum(less)
        total = paddle.sum(label_ones)
        acc = paddle.divide(correct, total)
        self._cost = avg_cost

        if is_infer:
            self._infer_results["acc"] = acc
            self._infer_results["loss"] = self._cost
        else:
            self._metrics["acc"] = acc
            self._metrics["loss"] = self._cost
コード例 #27
0
ファイル: static_model.py プロジェクト: JohnGao1007/paddle
    def net(self, input, is_infer=False):
        if is_infer:
            self.batch_size = self.config.get("runner.infer_batch_size")
        else:
            self.batch_size = self.config.get("runner.train_batch_size")
        tagspace_model = TagspaceLayer(self.vocab_text_size,
                                       self.vocab_tag_size, self.emb_dim,
                                       self.hid_dim, self.win_size,
                                       self.margin, self.neg_size,
                                       self.text_len)
        cos_pos, cos_neg = tagspace_model(input)
        # calculate hinge loss
        loss_part1 = paddle.subtract(
            paddle.full(shape=[self.batch_size, 1],
                        fill_value=self.margin,
                        dtype='float32'), cos_pos)
        loss_part2 = paddle.add(loss_part1, cos_neg)
        loss_part3 = paddle.maximum(
            paddle.full(shape=[self.batch_size, 1],
                        fill_value=0.0,
                        dtype='float32'), loss_part2)
        avg_cost = paddle.mean(loss_part3)

        less = paddle.cast(paddle.less_than(cos_neg, cos_pos), dtype='float32')
        label_ones = paddle.full(dtype='float32',
                                 shape=[self.batch_size, 1],
                                 fill_value=1.0)
        correct = paddle.sum(less)
        total = paddle.sum(label_ones)
        acc = paddle.divide(correct, total)
        self.inference_target_var = acc

        if is_infer:
            fetch_dict = {'ACC': acc}
            return fetch_dict

        self._cost = avg_cost

        fetch_dict = {'cost': avg_cost, 'ACC': acc}
        return fetch_dict
コード例 #28
0
    def forward(self, similarities_matrix, query_img_id, gallery_img_id,
                keep_mask):
        metric_dict = dict()

        choosen_indices = paddle.argsort(similarities_matrix,
                                         axis=1,
                                         descending=True)
        gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0])
        gallery_labels_transpose = paddle.broadcast_to(
            gallery_labels_transpose,
            shape=[
                choosen_indices.shape[0], gallery_labels_transpose.shape[1]
            ])
        choosen_label = paddle.index_sample(gallery_labels_transpose,
                                            choosen_indices)
        equal_flag = paddle.equal(choosen_label, query_img_id)
        if keep_mask is not None:
            keep_mask = paddle.index_sample(keep_mask.astype('float32'),
                                            choosen_indices)
            equal_flag = paddle.logical_and(equal_flag,
                                            keep_mask.astype('bool'))
        equal_flag = paddle.cast(equal_flag, 'float32')

        num_rel = paddle.sum(equal_flag, axis=1)
        num_rel = paddle.greater_than(num_rel, paddle.to_tensor(0.))
        num_rel_index = paddle.nonzero(num_rel.astype("int"))
        num_rel_index = paddle.reshape(num_rel_index, [num_rel_index.shape[0]])
        equal_flag = paddle.index_select(equal_flag, num_rel_index, axis=0)

        acc_sum = paddle.cumsum(equal_flag, axis=1)
        div = paddle.arange(acc_sum.shape[1]).astype("float32") + 1
        precision = paddle.divide(acc_sum, div)

        #calc map
        precision_mask = paddle.multiply(equal_flag, precision)
        ap = paddle.sum(precision_mask, axis=1) / paddle.sum(equal_flag,
                                                             axis=1)
        metric_dict["mAP"] = paddle.mean(ap).numpy()[0]
        return metric_dict
コード例 #29
0
    def __call__(self, hm, wh, im_shape, scale_factor):
        heatmap = F.sigmoid(hm)
        heat = self._simple_nms(heatmap)
        scores, inds, clses, ys, xs = self._topk(heat)
        ys = paddle.cast(ys, 'float32') * self.down_ratio
        xs = paddle.cast(xs, 'float32') * self.down_ratio
        scores = paddle.tensor.unsqueeze(scores, [1])
        clses = paddle.tensor.unsqueeze(clses, [1])

        wh_t = paddle.transpose(wh, [0, 2, 3, 1])
        wh = paddle.reshape(wh_t, [-1, paddle.shape(wh_t)[-1]])
        wh = paddle.gather(wh, inds)

        x1 = xs - wh[:, 0:1]
        y1 = ys - wh[:, 1:2]
        x2 = xs + wh[:, 2:3]
        y2 = ys + wh[:, 3:4]

        bboxes = paddle.concat([x1, y1, x2, y2], axis=1)

        scale_y = scale_factor[:, 0:1]
        scale_x = scale_factor[:, 1:2]
        scale_expand = paddle.concat(
            [scale_x, scale_y, scale_x, scale_y], axis=1)
        boxes_shape = paddle.shape(bboxes)
        boxes_shape.stop_gradient = True
        scale_expand = paddle.expand(scale_expand, shape=boxes_shape)
        bboxes = paddle.divide(bboxes, scale_expand)
        results = paddle.concat([clses, scores, bboxes], axis=1)
        # hack: append result with cls=-1 and score=1. to avoid all scores
        # are less than score_thresh which may cause error in gather.
        fill_r = paddle.to_tensor(np.array([[-1, 1, 0, 0, 0, 0]]))
        fill_r = paddle.cast(fill_r, results.dtype)
        results = paddle.concat([results, fill_r])
        scores = results[:, 1]
        valid_ind = paddle.nonzero(scores > self.score_thresh)
        results = paddle.gather(results, valid_ind)
        return results, paddle.shape(results)[0:1]
コード例 #30
0
    def forward(self, x, inp):
        B = self.conv_theta(x)
        sizeB = paddle.shape(B)
        B = paddle.flatten(B, 2, 3)

        sizex = paddle.shape(x)
        x_reduce = self.conv_phi(x)

        x_reduce = paddle.flatten(x_reduce, 2, 3).transpose((0, 2, 1))

        V = paddle.bmm(B, x_reduce).transpose((0, 2, 1))
        V = paddle.divide(V, (sizex[2] * sizex[3]).astype('float32'))

        class_node, new_V = self.graph(inp, V)
        D = B.transpose((0, 2, 1))
        Y = paddle.bmm(D, new_V.transpose((0, 2, 1)))
        Y = Y.transpose((0, 2, 1)).reshape((sizex[0], self.num_state, \
                                            sizex[2], -1))
        Y = self.extend_dim(Y)
        Y = self.bn(Y)
        out = Y + x

        return out, class_node