コード例 #1
0
def equal_logical_xor(name: str, x, y, z):
    import paddle
    paddle.enable_static()

    with paddle.static.program_guard(paddle.static.Program(),
                                     paddle.static.Program()):
        node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
        node_y = paddle.static.data(name='y', shape=y.shape, dtype='float32')
        node_z = paddle.static.data(name='z', shape=z.shape, dtype='float32')

        bool_x = paddle.equal(node_x, node_y)
        bool_y = paddle.equal(node_x, node_z)

        out = paddle.logical_and(bool_x, bool_y)
        out = paddle.cast(out, x.dtype)

        cpu = paddle.static.cpu_places(1)
        exe = paddle.static.Executor(cpu[0])
        # startup program will call initializer to initialize the parameters.
        exe.run(paddle.static.default_startup_program())

        outs = exe.run(feed={'x': x, 'y': y, 'z': z}, fetch_list=[out])

        saveModel(name,
                  exe,
                  feedkeys=['x', 'y', 'z'],
                  fetchlist=[out],
                  inputs=[x, y, z],
                  outputs=[outs[0]],
                  target_dir=sys.argv[1])

    return outs[0]
コード例 #2
0
ファイル: lookahead.py プロジェクト: wuhuachaocoding/Paddle
    def _append_optimize_op(self, block, param_and_grad):
        one_var = paddle.ones(shape=[1], dtype='int32', name='lookahead_ones')
        zero_var = paddle.zeros(shape=[1],
                                dtype='int32',
                                name='lookahead_zeros')
        k_var = layers.create_global_var(
            name=unique_name.generate("lookahead_k"),
            shape=[1],
            value=self.k,
            dtype='int32',
            persistable=True)

        mod = paddle.remainder(self._global_step_var, k_var)

        cond_1 = paddle.equal(self._global_step_var, one_var)
        cond_1 = paddle.cast(cond_1, dtype='float32')

        cond_2 = paddle.equal(mod, zero_var)
        cond_2 = paddle.cast(cond_2, dtype='float32')

        slow_var = self._get_accumulator(self._slow_str, param_and_grad[0])

        tmp_var = cond_1 * param_and_grad[0] + (1 - cond_1) * slow_var
        paddle.assign(tmp_var, slow_var)

        tmp_var = self.alpha * param_and_grad[0] + (1.0 -
                                                    self.alpha) * slow_var
        tmp_var_1 = cond_2 * tmp_var + (1 - cond_2) * param_and_grad[0]
        paddle.assign(tmp_var_1, param_and_grad[0])

        tmp_var_1 = cond_2 * tmp_var + (1 - cond_2) * slow_var
        paddle.assign(tmp_var_1, slow_var)
コード例 #3
0
def compute_iou(pred_i, label_i, zero):
    intersect_area_i = paddle.sum(pred_i * label_i)
    if paddle.equal(intersect_area_i, zero):
        return 0

    pred_area_i = paddle.sum(pred_i)
    label_area_i = paddle.sum(label_i)
    union_area_i = pred_area_i + label_area_i - intersect_area_i
    if paddle.equal(union_area_i, zero):
        return 1
    else:
        return intersect_area_i / union_area_i
コード例 #4
0
    def finetunning(self, x_spt, y_spt, x_qry, y_qry):
        # assert len(x_spt.shape) == 4

        query_size = x_qry.shape[0]
        correct_list = [0 for _ in range(self.update_step_test + 1)]

        new_net = deepcopy(self.net)
        y_hat = new_net(x_spt)
        loss = F.cross_entropy(y_hat, y_spt)
        grad = paddle.grad(loss, new_net.parameters())
        fast_weights = list(
            map(lambda p: p[1] - self.base_lr * p[0],
                zip(grad, new_net.parameters())))

        # 在query集上测试,计算准确率
        # 这一步使用更新前的数据
        with paddle.no_grad():
            y_hat = new_net(x_qry,
                            params=new_net.parameters(),
                            bn_training=True)
            pred_qry = F.softmax(y_hat, axis=1).argmax(axis=1)  # size = (75)
            correct = paddle.equal(pred_qry, y_qry).numpy().sum().item()
            correct_list[0] += correct

        # 使用更新后的数据在query集上测试。
        with paddle.no_grad():
            y_hat = new_net(x_qry, params=fast_weights, bn_training=True)
            pred_qry = F.softmax(y_hat, axis=1).argmax(axis=1)  # size = (75)
            correct = paddle.equal(pred_qry, y_qry).numpy().sum().item()
            correct_list[1] += correct

        for k in range(1, self.update_step_test):
            y_hat = new_net(x_spt, params=fast_weights, bn_training=True)
            loss = F.cross_entropy(y_hat, y_spt)
            grad = paddle.grad(loss, fast_weights)
            fast_weights = list(
                map(lambda p: p[1] - self.base_lr * p[0],
                    zip(grad, fast_weights)))

            y_hat = new_net(x_qry, fast_weights, bn_training=True)

            with paddle.no_grad():
                pred_qry = F.softmax(y_hat, axis=1).argmax(axis=1)
                correct = paddle.equal(pred_qry, y_qry).numpy().sum().item()
                correct_list[k + 1] += correct

        del new_net
        accs = np.array(correct_list) / query_size
        return accs
コード例 #5
0
def _hsv_to_rgb(img):
    """Convert a image Tensor from HSV to RGB.
    """
    h, s, v = img.unbind(axis=-3)
    f = h * 6.0
    i = paddle.floor(f)
    f = f - i
    i = i.astype(paddle.int32) % 6

    p = paddle.clip(v * (1.0 - s), 0.0, 1.0)
    q = paddle.clip(v * (1.0 - s * f), 0.0, 1.0)
    t = paddle.clip(v * (1.0 - s * (1.0 - f)), 0.0, 1.0)

    mask = paddle.equal(
        i.unsqueeze(axis=-3),
        paddle.arange(
            6, dtype=i.dtype).reshape((-1, 1, 1))).astype(img.dtype)
    matrix = paddle.stack(
        [
            paddle.stack(
                [v, q, p, p, t, v], axis=-3), paddle.stack(
                    [t, v, v, q, p, p], axis=-3), paddle.stack(
                        [p, p, t, v, v, q], axis=-3)
        ],
        axis=-4)
    return paddle.einsum("...ijk, ...xijk -> ...xjk", mask, matrix)
コード例 #6
0
    def infer_net(self, inputs):
        def embedding_layer(input, table_name, initializer_instance=None):
            emb = paddle.static.nn.embedding(
                input=input,
                size=[self.sparse_feature_number, self.sparse_feature_dim],
                param_attr=table_name)
            return emb

        all_label = np.arange(self.sparse_feature_number).reshape(
            self.sparse_feature_number).astype('int32')
        self.all_label = paddle.cast(x=paddle.nn.functional.assign(all_label),
                                     dtype='int64')
        emb_all_label = embedding_layer(self.all_label, "emb")
        emb_a = embedding_layer(inputs[0], "emb")
        emb_b = embedding_layer(inputs[1], "emb")
        emb_c = embedding_layer(inputs[2], "emb")

        target = paddle.add(x=paddle.fluid.layers.nn.elementwise_sub(
            emb_b, emb_a),
                            y=emb_c)

        emb_all_label_l2 = paddle.fluid.layers.l2_normalize(x=emb_all_label,
                                                            axis=1)
        dist = paddle.fluid.layers.matmul(x=target,
                                          y=emb_all_label_l2,
                                          transpose_y=True)
        values, pred_idx = paddle.topk(x=dist, k=1)
        label = paddle.fluid.layers.expand(paddle.unsqueeze(inputs[3],
                                                            axis=[1]),
                                           expand_times=[1, 1])
        label_ones = paddle.fluid.layers.fill_constant_batch_size_like(
            label, shape=[-1, 1], value=1.0, dtype='float32')
        right_cnt = paddle.sum(
            x=paddle.cast(paddle.equal(x=pred_idx, y=label), dtype='float32'))
        total_cnt = paddle.sum(x=label_ones)

        global_right_cnt = paddle.fluid.layers.create_global_var(
            name="global_right_cnt",
            persistable=True,
            dtype='float32',
            shape=[1],
            value=0)
        global_total_cnt = paddle.fluid.layers.create_global_var(
            name="global_total_cnt",
            persistable=True,
            dtype='float32',
            shape=[1],
            value=0)
        global_right_cnt.stop_gradient = True
        global_total_cnt.stop_gradient = True

        tmp1 = paddle.add(x=right_cnt, y=global_right_cnt)
        paddle.nn.functional.assign(tmp1, global_right_cnt)
        tmp2 = paddle.add(x=total_cnt, y=global_total_cnt)
        paddle.nn.functional.assign(tmp2, global_total_cnt)

        acc = paddle.divide(x=global_right_cnt,
                            y=global_total_cnt,
                            name="total_acc")
        self._infer_results['acc'] = acc
コード例 #7
0
    def forward(self):
        fpn_rois = self.input('FpnRois', 0)
        areas = self.bbox_area(fpn_rois)
        scale = paddle.sqrt(areas)
        num_level = self.max_level - self.min_level + 1
        target_level = paddle.log(scale / self.refer_scale + 1e-06) / np.log(2)
        target_level = paddle.floor(self.refer_level + target_level)
        target_level = paddle.clip(target_level,
                                   min=self.min_level,
                                   max=self.max_level)

        rois = list()
        rois_idx_order = list()

        for level in range(self.min_level, self.max_level + 1):
            level_tensor = paddle.full_like(target_level, fill_value=level)
            res = paddle.equal(target_level, level_tensor)
            res = paddle.squeeze(res, axis=1)
            res = paddle.cast(res, dtype='int32')
            index = paddle.nonzero(res)
            roi = paddle.gather(fpn_rois, index, axis=0)
            rois.append(roi)
            rois_idx_order.append(index)
        rois_idx_order = paddle.concat(rois_idx_order, axis=0)
        size = paddle.shape(rois_idx_order)[0]
        _, rois_idx_restore = paddle.topk(rois_idx_order,
                                          axis=0,
                                          sorted=True,
                                          largest=False,
                                          k=size)
        #rois_idx_restore = paddle.cast(rois_idx_restore, dtype='int32')
        return {'MultiFpnRois': rois, 'RestoreIndex': [rois_idx_restore]}
コード例 #8
0
    def infer_forward(self, dy_model, metrics_list, batch_data, config):
        dy_model.train()
        x_spt, y_spt, x_qry, y_qry = self.create_feeds(batch_data, config)
        x_spt = x_spt[0]
        y_spt = y_spt[0]
        x_qry = x_qry[0]
        y_qry = y_qry[0]
        update_step = config.get("hyper_parameters.update_step_test", 5)
        query_size = x_qry.shape[0]
        correct_list = []
        correct_list.clear()

        task_net = copy.deepcopy(dy_model)
        base_lr = config.get("hyper_parameters.base_optimizer.learning_rate",
                             0.1)
        task_optimizer = paddle.optimizer.SGD(learning_rate=base_lr,
                                              parameters=task_net.parameters())
        for j in range(update_step):
            task_optimizer.clear_grad()
            y_hat = task_net.forward(x_spt)
            loss_spt = F.cross_entropy(y_hat, y_spt)
            loss_spt.backward()
            task_optimizer.step()

        y_hat = task_net.forward(x_qry)
        pred_qry = F.softmax(y_hat, axis=1).argmax(axis=1)
        correct = paddle.equal(pred_qry, y_qry).numpy().sum().item()
        correct_list.append(correct)
        acc = sum(correct_list) / query_size
        acc = paddle.to_tensor(acc)
        print_dict = {"acc": acc}

        return metrics_list, print_dict
コード例 #9
0
ファイル: mol_model.py プロジェクト: xueeinstein/PaddleHelix
    def label2edge(self, label, mask_diag=True):
        # get size
        num_samples = label.shape[1]
        # reshape
        label_i = paddle.transpose(
            paddle.expand(label,
                          [num_samples, label.shape[0], label.shape[1]]),
            [1, 2, 0])
        label_j = label_i.transpose((0, 2, 1))
        # compute edge
        edge = paddle.cast(paddle.equal(label_i, label_j), 'float32')

        # expand
        edge = edge.unsqueeze(1)
        if self.edge_type == 'dist':
            edge = 1 - edge
        if self.edge_dim == 2:
            edge = paddle.concat([edge, 1 - edge], 1)

        if mask_diag:
            diag_mask = 1.0 - paddle.expand(
                paddle.eye(edge.shape[2]),
                [edge.shape[0], self.edge_dim, edge.shape[2], edge.shape[2]])
            edge = edge * diag_mask
        if self.edge_activation == 'softmax':
            edge = edge / edge.sum(-1).unsqueeze(-1)
        return edge
コード例 #10
0
    def forward(self, similarities_matrix, query_img_id, gallery_img_id,
                keep_mask):
        metric_dict = dict()

        #get cmc
        choosen_indices = paddle.argsort(similarities_matrix,
                                         axis=1,
                                         descending=True)
        gallery_labels_transpose = paddle.transpose(gallery_img_id, [1, 0])
        gallery_labels_transpose = paddle.broadcast_to(
            gallery_labels_transpose,
            shape=[
                choosen_indices.shape[0], gallery_labels_transpose.shape[1]
            ])
        choosen_label = paddle.index_sample(gallery_labels_transpose,
                                            choosen_indices)
        equal_flag = paddle.equal(choosen_label, query_img_id)
        if keep_mask is not None:
            keep_mask = paddle.index_sample(keep_mask.astype('float32'),
                                            choosen_indices)
            equal_flag = paddle.logical_and(equal_flag,
                                            keep_mask.astype('bool'))
        equal_flag = paddle.cast(equal_flag, 'float32')

        Ns = paddle.arange(gallery_img_id.shape[0]) + 1
        equal_flag_cumsum = paddle.cumsum(equal_flag, axis=1)
        Precision_at_k = (paddle.mean(equal_flag_cumsum, axis=0) / Ns).numpy()

        for k in self.topk:
            metric_dict["precision@{}".format(k)] = Precision_at_k[k - 1]

        return metric_dict
コード例 #11
0
ファイル: generate_equal.py プロジェクト: yury-intel/openvino
def equal(name: str, x, y):
    import paddle
    paddle.enable_static()

    node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32')
    node_y = paddle.static.data(name='y', shape=y.shape, dtype='float32')

    out = paddle.equal(node_x, node_y)
    out = paddle.cast(out, np.float32)

    cpu = paddle.static.cpu_places(1)
    exe = paddle.static.Executor(cpu[0])
    # startup program will call initializer to initialize the parameters.
    exe.run(paddle.static.default_startup_program())

    outs = exe.run(feed={'x': x, 'y': y}, fetch_list=[out])

    saveModel(name,
              exe,
              feedkeys=['x', 'y'],
              fetchlist=[out],
              inputs=[x, y],
              outputs=[outs[0]],
              target_dir=sys.argv[1])

    return outs[0]
コード例 #12
0
def _rgb_to_hsv(img):
    """Convert a image Tensor from RGB to HSV. This implementation is based on Pillow (
            https://github.com/python-pillow/Pillow/blob/main/src/libImaging/Convert.c)
    """
    maxc = img.max(axis=-3)
    minc = img.min(axis=-3)

    is_equal = paddle.equal(maxc, minc)
    one_divisor = paddle.ones_like(maxc)
    c_delta = maxc - minc
    # s is 0 when maxc == minc, set the divisor to 1 to avoid zero divide.
    s = c_delta / paddle.where(is_equal, one_divisor, maxc)

    r, g, b = img.unbind(axis=-3)
    c_delta_divisor = paddle.where(is_equal, one_divisor, c_delta)
    # when maxc == minc, there is r == g == b, set the divisor to 1 to avoid zero divide.
    rc = (maxc - r) / c_delta_divisor
    gc = (maxc - g) / c_delta_divisor
    bc = (maxc - b) / c_delta_divisor

    hr = (maxc == r).astype(maxc.dtype) * (bc - gc)
    hg = ((maxc == g) & (maxc != r)).astype(maxc.dtype) * (rc - bc + 2.0)
    hb = ((maxc != r) & (maxc != g)).astype(maxc.dtype) * (gc - rc + 4.0)
    h = (hr + hg + hb) / 6.0 + 1.0
    h = h - h.trunc()
    return paddle.stack([h, s, maxc], axis=-3)
コード例 #13
0
 def _points_nms(self, heat, kernel=2):
     hmax = fluid.layers.pool2d(input=heat,
                                pool_size=kernel,
                                pool_type='max',
                                pool_padding=1)
     keep = fluid.layers.cast(paddle.equal(hmax[:, :, :-1, :-1], heat),
                              'float32')
     return paddle.multiply(heat, keep)
コード例 #14
0
 def get_dist_subgraphs(self, graph, dist_inds):
     subg_edge_list = []
     if self.num_dist == 1:
         subg_eids = paddle.greater_equal(
             dist_inds, paddle.to_tensor(0.)).nonzero().squeeze()
         subg_edge_list.append(subg_eids)
     elif self.num_dist == 2:
         subg_eids = paddle.equal(dist_inds,
                                  paddle.to_tensor(0.)).nonzero().squeeze()
         subg_edge_list.append(subg_eids)
         subg_eids = paddle.greater_equal(
             dist_inds, paddle.to_tensor(1.)).nonzero().squeeze()
         subg_edge_list.append(subg_eids)
     else:
         for k in range(self.num_dist):
             subg_edge_list.append(
                 paddle.equal(dist_inds, paddle.to_tensor(
                     float(k))).nonzero().squeeze())
コード例 #15
0
    def test_api(self):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            label = fluid.layers.assign(np.array([3, 3], dtype="int32"))
            limit = fluid.layers.assign(np.array([3, 2], dtype="int32"))
            out = paddle.equal(x=label, y=limit)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            res, = exe.run(fetch_list=[out])
        self.assertEqual((res == np.array([True, False])).all(), True)

        with fluid.program_guard(fluid.Program(), fluid.Program()):
            label = fluid.layers.assign(np.array([3, 3], dtype="int32"))
            limit = fluid.layers.assign(np.array([3, 3], dtype="int32"))
            out = paddle.equal(x=label, y=limit)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            res, = exe.run(fetch_list=[out])
        self.assertEqual((res == np.array([True, True])).all(), True)
コード例 #16
0
    def forward(self, x1, x2, target):
        similarity = paddle.fluid.layers.reduce_sum(x1 * x2, dim=-1) / (
            paddle.norm(x1, axis=-1) * paddle.norm(x2, axis=-1) + self.epsilon)
        one_list = paddle.full_like(target, fill_value=1)
        out = paddle.fluid.layers.reduce_mean(
            paddle.where(
                paddle.equal(target, one_list), 1. - similarity,
                paddle.maximum(paddle.zeros_like(similarity),
                               similarity - self.margin)))

        return out
コード例 #17
0
 def val_epoch(self, datasets):
     self.model.eval()
     acc = list()
     for batch_id, data in enumerate(datasets()):
         images, labels = data
         pred = self.model(images)
         pred = paddle.argmax(pred, axis=-1)  # 取 pred 中得分最高的索引作为分类结果
         res = paddle.equal(pred, labels)
         res = paddle.cast(res, dtype='float32')
         acc.extend(res.numpy())  # 追加
     acc = np.array(acc).mean()
     return acc
コード例 #18
0
ファイル: net.py プロジェクト: duyiqi17/PaddleRec
        def dmr_fcn_attention(item_eb,
                              item_his_eb,
                              context_his_eb,
                              mask,
                              mode='SUM'):
            mask = paddle.equal(mask, paddle.ones_like(mask))
            item_eb_tile = paddle.tile(item_eb,
                                       [1, paddle.shape(mask)[1]])  # B, T*E
            item_eb_tile = paddle.reshape(
                item_eb_tile,
                [-1, paddle.shape(mask)[1], item_eb.shape[-1]])  # B, T, E
            if context_his_eb is None:
                query = item_eb_tile
            else:
                query = paddle.concat([item_eb_tile, context_his_eb], axis=-1)
            query = self.query_layer2(query)
            query = self.query_prelu2(query)
            dmr_all = paddle.concat(
                [
                    query, item_his_eb, query - item_his_eb,
                    query * item_his_eb
                ],
                axis=-1)
            att_layer_1 = self.att_layer1_layer2(dmr_all)
            att_layer_1 = F.sigmoid(att_layer_1)
            att_layer_2 = self.att_layer2_layer2(att_layer_1)
            att_layer_2 = F.sigmoid(att_layer_2)
            att_layer_3 = self.att_layer3_layer2(att_layer_2)  # B, T, 1
            att_layer_3 = paddle.reshape(
                att_layer_3, [-1, 1, paddle.shape(item_his_eb)[1]])  # B,1,T
            scores = att_layer_3
            scores = scores.reshape([-1, 1, self.history_length])  ##

            # Mask
            key_masks = paddle.unsqueeze(mask, 1)  # B,1,T
            paddings = paddle.ones_like(scores) * (-2**32 + 1)
            paddings_no_softmax = paddle.zeros_like(scores)
            scores = paddle.where(key_masks, scores, paddings)  # [B, 1, T]
            scores_no_softmax = paddle.where(key_masks, scores,
                                             paddings_no_softmax)

            scores = F.softmax(scores)

            if mode == 'SUM':
                output = paddle.matmul(scores, item_his_eb)  # [B, 1, H]
                output = paddle.sum(output, axis=1)  # B,E
            else:
                scores = paddle.reshape(scores,
                                        [-1, paddle.shape(item_his_eb)[1]])
                output = item_his_eb * paddle.unsqueeze(scores, -1)
                output = paddle.reshape(output, paddle.shape(item_his_eb))

            return output, scores, scores_no_softmax
コード例 #19
0
ファイル: net.py プロジェクト: duyiqi17/PaddleRec
    def forward(self, inputs, labels, weights, bias):
        """forward
        """
        # weights.stop_gradient = False
        embedding_dim = paddle.shape(weights)[-1]
        true_log_probs, samp_log_probs, neg_samples = self.sample(labels)
        n_sample = neg_samples.shape[0]

        b1 = paddle.shape(labels)[0]
        b2 = paddle.shape(labels)[1]

        all_ids = paddle.concat([labels.reshape((-1, )), neg_samples])
        all_w = paddle.gather(weights, all_ids)

        true_w = all_w[:-n_sample].reshape((-1, b2, embedding_dim))
        sample_w = all_w[-n_sample:].reshape((n_sample, embedding_dim))

        all_b = paddle.gather(bias, all_ids)
        true_b = all_b[:-n_sample].reshape((-1, 1))

        sample_b = all_b[-n_sample:]

        # [B, D] * [B, 1,D]
        true_logist = paddle.matmul(
            true_w, inputs.unsqueeze(1), transpose_y=True).squeeze(1) + true_b

        sample_logist = paddle.matmul(
            inputs.unsqueeze(1), sample_w, transpose_y=True) + sample_b

        if self.subtract_log_q:
            true_logist = true_logist - true_log_probs.unsqueeze(1)
            sample_logist = sample_logist - samp_log_probs

        if self.remove_accidental_hits:
            hit = (paddle.equal(labels[:, :], neg_samples)).unsqueeze(1)
            padding = paddle.ones_like(sample_logist) * -1e30
            sample_logist = paddle.where(hit, padding, sample_logist)

        sample_logist = sample_logist.squeeze(1)
        out_logist = paddle.concat([true_logist, sample_logist], axis=1)
        out_label = paddle.concat([
            paddle.ones_like(true_logist) / self.num_true,
            paddle.zeros_like(sample_logist)
        ],
                                  axis=1)

        sampled_loss = F.softmax_with_cross_entropy(logits=out_logist,
                                                    label=out_label,
                                                    soft_label=True)
        return sampled_loss, out_logist, out_label
コード例 #20
0
    def forward(self, input, target):
        """
        Args:
            inputs: feature matrix with shape (batch_size, feat_dim)
            target: ground truth labels with shape (num_classes)
        """
        inputs = input["features"]

        bs = inputs.shape[0]
        # Compute pairwise distance, replace by the official when merged
        dist = paddle.pow(inputs, 2).sum(axis=1, keepdim=True).expand([bs, bs])
        dist = dist + dist.t()
        dist = paddle.addmm(input=dist,
                            x=inputs,
                            y=inputs.t(),
                            alpha=-2.0,
                            beta=1.0)
        dist = paddle.clip(dist, min=1e-12).sqrt()

        mask = paddle.equal(target.expand([bs, bs]),
                            target.expand([bs, bs]).t())
        mask_numpy_idx = mask.numpy()
        dist_ap, dist_an = [], []
        for i in range(bs):
            # dist_ap_i = paddle.to_tensor(dist[i].numpy()[mask_numpy_idx[i]].max(),dtype='float64').unsqueeze(0)
            # dist_ap_i.stop_gradient = False
            # dist_ap.append(dist_ap_i)
            dist_ap.append(
                max([
                    dist[i][j]
                    if mask_numpy_idx[i][j] == True else float("-inf")
                    for j in range(bs)
                ]).unsqueeze(0))
            # dist_an_i = paddle.to_tensor(dist[i].numpy()[mask_numpy_idx[i] == False].min(), dtype='float64').unsqueeze(0)
            # dist_an_i.stop_gradient = False
            # dist_an.append(dist_an_i)
            dist_an.append(
                min([
                    dist[i][k]
                    if mask_numpy_idx[i][k] == False else float("inf")
                    for k in range(bs)
                ]).unsqueeze(0))

        dist_ap = paddle.concat(dist_ap, axis=0)
        dist_an = paddle.concat(dist_an, axis=0)

        # Compute ranking hinge loss
        y = paddle.ones_like(dist_an)
        loss = self.ranking_loss(dist_an, dist_ap, y)
        return {"TripletLoss": loss}
コード例 #21
0
    def train_forward(self, dy_model, metrics_list, batch_data, config):
        np.random.seed(12345)
        x_spt, y_spt, x_qry, y_qry = self.create_feeds(batch_data, config)
        update_step = config.get("hyper_parameters.update_step", 5)
        task_num = x_spt.shape[0]
        query_size = x_qry.shape[
            1]  # 75 = 15 * 5, x_qry.shape = [32,75,1,28,28]
        loss_list = []
        loss_list.clear()
        correct_list = []
        correct_list.clear()
        task_grad = [[] for _ in range(task_num)]

        for i in range(task_num):
            # 外循环
            task_net = copy.deepcopy(dy_model)
            base_lr = config.get(
                "hyper_parameters.base_optimizer.learning_rate", 0.1)
            task_optimizer = paddle.optimizer.SGD(
                learning_rate=base_lr, parameters=task_net.parameters())
            for j in range(update_step):
                #内循环
                task_optimizer.clear_grad()  # 梯度清零
                y_hat = task_net.forward(x_spt[i])  # (setsz, ways) [5,5]
                loss_spt = F.cross_entropy(y_hat, y_spt[i])
                loss_spt.backward()
                task_optimizer.step()

            y_hat = task_net.forward(x_qry[i])
            loss_qry = F.cross_entropy(y_hat, y_qry[i])
            loss_qry.backward()
            for k in task_net.parameters():
                task_grad[i].append(k.grad)
            loss_list.append(loss_qry)
            pred_qry = F.softmax(y_hat, axis=1).argmax(axis=1)
            correct = paddle.equal(pred_qry, y_qry[i]).numpy().sum().item()
            correct_list.append(correct)

        loss_average = paddle.add_n(loss_list) / task_num
        acc = sum(correct_list) / (query_size * task_num)

        for num, k in enumerate(dy_model.parameters()):
            tmp_list = [task_grad[i][num] for i in range(task_num)]
            if tmp_list[0] is not None:
                k._set_grad_ivar(paddle.add_n(tmp_list) / task_num)

        acc = paddle.to_tensor(acc)
        print_dict = {'loss': loss_average, "acc": acc}
        _ = paddle.ones(shape=[5, 5], dtype="float32")
        return _, metrics_list, print_dict
コード例 #22
0
 def forward(self, bbox_out, bbox_target, label):
     # 保留pos 1 和part -1 的数据
     ones = paddle.ones_like(label)
     zeros = paddle.zeros_like(label)
     valid_label = paddle.where(paddle.equal(paddle.abs(label), ones), ones,
                                zeros)
     valid_label = paddle.squeeze(valid_label)
     # 获取有效值的总数
     keep_num = int(paddle.sum(valid_label).numpy()[0] * self.keep_ratio)
     loss = self.square_loss(input=bbox_out, label=bbox_target)
     loss = paddle.sum(loss, axis=1)
     loss = loss * valid_label
     # 取有效数据计算损失
     loss, _ = paddle.topk(loss, k=keep_num, axis=0)
     return paddle.mean(loss)
コード例 #23
0
ファイル: network.py プロジェクト: qiufengyu/Projects-2021
def train(model):
    model = Network()
    model.train()
    train_loader = load_data()
    test_loader = load_data('test')
    optimizer = paddle.optimizer.Adam(learning_rate=5e-3,
                                      parameters=model.parameters())
    for epoch in range(EPOCH_NUM):
        for batch_id, data in enumerate(train_loader()):
            # 准备数据
            features, labels = data
            features = paddle.to_tensor(features)
            labels = paddle.to_tensor(labels, stop_gradient=True)
            predicts = model(features)
            ce_loss = F.cross_entropy(predicts, labels)
            avg_loss = paddle.mean(ce_loss)
            # 每 20 批次输出一次损失
            if batch_id % 20 == 0:
                loss_val = avg_loss.numpy()[0]
                print('epoch: {}, batch: {}, loss: {}'.format(
                    epoch, batch_id, loss_val))
            # 后向传播,更新参数的过程
            avg_loss.backward()
            optimizer.step()
            optimizer.clear_grad()
        # 在测试集上验证模型的效果
        test_samples = 0
        correct = 0
        for batch_id, data in enumerate(test_loader()):
            features, labels = data
            features = paddle.to_tensor(features)
            labels = paddle.to_tensor(labels, stop_gradient=True)
            predicts = model(features)
            test_samples += len(labels)
            arg_max_predicts = paddle.argmax(predicts, axis=-1)
            correct_tensor = paddle.sum(
                paddle.cast(paddle.equal(labels, arg_max_predicts),
                            dtype=np.int64))
            correct_array = correct_tensor.numpy()
            correct += correct_array[0]
        acc = correct / test_samples
        print('epoch: {}, test cases: {}, correct: {}, accuracy: {}'.format(
            epoch, test_samples, correct, acc))

    # 保存模型参数和优化器的参数
    paddle.save(model.state_dict(), BASE_DIR_STRING + '/model.pdparams')
    paddle.save(optimizer.state_dict(), BASE_DIR_STRING + '/model.pdopt')
    print(optimizer.state_dict().keys())
コード例 #24
0
 def forward(self, landmark_out, landmark_target, label):
     # 只保留landmark数据 -2
     ones = paddle.ones_like(label)
     zeros = paddle.zeros_like(label)
     valid_label = paddle.where(
         paddle.equal(label, paddle.full_like(label, fill_value=-2)), ones,
         zeros)
     valid_label = paddle.squeeze(valid_label)
     # 获取有效值的总数
     keep_num = int(paddle.sum(valid_label).numpy()[0] * self.keep_ratio)
     loss = self.square_loss(input=landmark_out, label=landmark_target)
     loss = paddle.sum(loss, axis=1)
     loss = loss * valid_label
     # 取有效数据计算损失
     loss, _ = paddle.topk(loss, k=keep_num, axis=0)
     return paddle.mean(loss)
コード例 #25
0
ファイル: utils.py プロジェクト: sandyhouse/Paddle
def check_initial_inverse_hessian_estimate(H0):
    r"""Check whether the specified initial_inverse_hessian_estimate is symmetric and positive definite.
        Raise errors when precondition not met.

    Note: 
        In static graph can not raise error directly, so use py_func make raise_func as a op,
        and use paddle.static.nn.cond to decide if put the op in net.
        cholesky is the fast way to check positive definition, but in static graph can not catch 
        exception to raise value error, so use eigvals rather than cholesky in static graph.
    """
    is_symmetric = paddle.all(paddle.equal(H0, H0.t()))

    def raise_func():
        raise ValueError(
            "The initial_inverse_hessian_estimate should be symmetric and positive definite, but the specified is not."
        )

    if paddle.in_dynamic_mode():
        if not is_symmetric:
            raise_func()
        try:
            paddle.linalg.cholesky(H0)
        except RuntimeError as error:
            raise_func()
    else:

        def create_tmp_var(program, name, dtype, shape):
            return program.current_block().create_var(
                name=name, dtype=dtype, shape=shape)

        out_var = create_tmp_var(
            paddle.static.default_main_program(),
            name='output',
            dtype='float32',
            shape=[-1])

        def false_fn():
            paddle.static.nn.py_func(
                func=raise_func, x=is_symmetric, out=out_var)

        paddle.static.nn.cond(is_symmetric, None, false_fn)
        # eigvals only support cpu
        paddle.set_device("cpu")
        eigvals = paddle.paddle.linalg.eigvals(H0)
        is_positive = paddle.all(eigvals.real() > 0.) and paddle.all(
            eigvals.imag() == 0.)
        paddle.static.nn.cond(is_positive, None, false_fn)
コード例 #26
0
def accuracy(output, target, topk=(1, )):
    """Computes the precision@k for the specified values of k"""
    maxk = max(topk)
    batch_size = target.shape[0]  # 256

    _, pred = paddle.topk(output, maxk, 1, True, True)  # 256, 5
    pred = paddle.t(pred)  # 5,256
    correct = paddle.equal(pred,
                           paddle.expand_as(target.reshape([1, -1]),
                                            pred)).astype('float32')  # 5, 256

    res = []
    for k in topk:
        correct_k = paddle.flatten(correct[:k], start_axis=0,
                                   stop_axis=-1).sum(0)
        res.append(correct_k * (100.0 / batch_size))
    return res
コード例 #27
0
    def accuracy(self, pred, target, topk=1, thresh=None):
        """Calculate accuracy according to the prediction and target.

        Args:
            pred (torch.Tensor): The model prediction, shape (N, num_class)
            target (torch.Tensor): The target of each prediction, shape (N, )
            topk (int | tuple[int], optional): If the predictions in ``topk``
                matches the target, the predictions will be regarded as
                correct ones. Defaults to 1.
            thresh (float, optional): If not None, predictions with scores under
                this threshold are considered incorrect. Default to None.

        Returns:
            float | tuple[float]: If the input ``topk`` is a single integer,
                the function will return a single float as accuracy. If
                ``topk`` is a tuple containing multiple integers, the
                function will return a tuple containing accuracies of
                each ``topk`` number.
        """
        assert isinstance(topk, (int, tuple))
        if isinstance(topk, int):
            topk = (topk, )
            return_single = True
        else:
            return_single = False

        maxk = max(topk)
        if pred.shape[0] == 0:
            accu = [pred.new_tensor(0.) for i in range(len(topk))]
            return accu[0] if return_single else accu
        pred_value, pred_label = paddle.topk(pred, maxk, axis=1)
        pred_label = pred_label.transpose([1,
                                           0])  # transpose to shape (maxk, N)
        correct = paddle.equal(pred_label,
                               (target.reshape([1, -1]).expand_as(pred_label)))
        res = []
        for k in topk:
            correct_k = paddle.sum(correct[:k].reshape([-1]).astype('float32'),
                                   axis=0,
                                   keepdim=True)
            res.append(
                paddle.multiply(correct_k,
                                paddle.to_tensor(100.0 / pred.shape[0])))
        return res[0] if return_single else res
コード例 #28
0
    def _contrastive(self, feats_, labels_):
        """
        Args:
            feats_ (Tensor): sampled pixel, shape = [total_classes, n_view, feat_dim], total_classes = batch_size * single image classes
            labels_ (Tensor): label, shape = [total_classes]
        """
        anchor_num, n_view = feats_.shape[0], feats_.shape[1]

        labels_ = labels_.reshape((-1, 1))
        mask = paddle.equal(labels_,
                            paddle.transpose(labels_,
                                             [1, 0])).astype('float32')

        contrast_count = n_view
        contrast_feature = paddle.concat(paddle.unbind(feats_, axis=1), axis=0)

        anchor_feature = contrast_feature
        anchor_count = contrast_count

        anchor_dot_contrast = paddle.matmul(
            anchor_feature, paddle.transpose(contrast_feature,
                                             [1, 0])) / self.temperature
        logits_max = paddle.max(anchor_dot_contrast, axis=1, keepdim=True)
        logits = anchor_dot_contrast - logits_max

        mask = paddle.tile(mask, [anchor_count, contrast_count])
        neg_mask = 1 - mask

        logits_mask = 1 - paddle.eye(mask.shape[0]).astype('float32')
        mask = mask * logits_mask

        neg_logits = paddle.exp(logits) * neg_mask
        neg_logits = neg_logits.sum(1, keepdim=True)

        exp_logits = paddle.exp(logits)

        log_prob = logits - paddle.log(exp_logits + neg_logits)

        mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)

        loss = -(self.temperature / self.base_temperature) * mean_log_prob_pos
        loss = loss.mean()

        return loss
コード例 #29
0
 def get_discriminator_inputs(self, inputs, raw_inputs, gen_logits,
                              gen_labels, use_softmax_sample):
     """Sample from the generator to create discriminator input."""
     # get generator token result
     sampled_tokens = (self.sample_from_softmax(
         gen_logits, use_softmax_sample)).detach()
     sampled_tokids = paddle.argmax(sampled_tokens, axis=-1)
     # update token only at mask position
     # gen_labels : [B, L], L contains -100(unmasked) or token value(masked)
     # mask_positions : [B, L], L contains 0(unmasked) or 1(masked)
     umask_positions = paddle.zeros_like(gen_labels)
     mask_positions = paddle.ones_like(gen_labels)
     mask_positions = paddle.where(gen_labels == -100, umask_positions,
                                   mask_positions)
     updated_inputs = self.update_inputs(inputs, sampled_tokids,
                                         mask_positions)
     # use inputs and updated_input to get discriminator labels
     labels = mask_positions * (paddle.ones_like(inputs) - paddle.equal(
         updated_inputs, raw_inputs).astype("int32"))
     return updated_inputs, labels, sampled_tokids
コード例 #30
0
    def __call__(self, predicts, batch):
        assert isinstance(predicts, (list, tuple))
        features, predicts = predicts

        feats_reshape = paddle.reshape(
            features, [-1, features.shape[-1]]).astype("float64")
        label = paddle.argmax(predicts, axis=2)
        label = paddle.reshape(label, [label.shape[0] * label.shape[1]])

        batch_size = feats_reshape.shape[0]

        #calc l2 distance between feats and centers
        square_feat = paddle.sum(paddle.square(feats_reshape),
                                 axis=1,
                                 keepdim=True)
        square_feat = paddle.expand(square_feat,
                                    [batch_size, self.num_classes])

        square_center = paddle.sum(paddle.square(self.centers),
                                   axis=1,
                                   keepdim=True)
        square_center = paddle.expand(
            square_center, [self.num_classes, batch_size]).astype("float64")
        square_center = paddle.transpose(square_center, [1, 0])

        distmat = paddle.add(square_feat, square_center)
        feat_dot_center = paddle.matmul(feats_reshape,
                                        paddle.transpose(self.centers, [1, 0]))
        distmat = distmat - 2.0 * feat_dot_center

        #generate the mask
        classes = paddle.arange(self.num_classes).astype("int64")
        label = paddle.expand(paddle.unsqueeze(label, 1),
                              (batch_size, self.num_classes))
        mask = paddle.equal(
            paddle.expand(classes, [batch_size, self.num_classes]),
            label).astype("float64")
        dist = paddle.multiply(distmat, mask)

        loss = paddle.sum(paddle.clip(dist, min=1e-12, max=1e+12)) / batch_size
        return {'loss_center': loss}