Exemplo n.º 1
0
    def forward(self, p, q, pad_mask=None):
        """
        Args:
            p(obj:`Tensor`): the first forward logits of training examples.
            q(obj:`Tensor`): the second forward logits of training examples.
            pad_mask(obj:`Tensor`, optional): The Tensor containing the binary mask to index with, it's data type is bool.

        Returns:
            loss(obj:`Tensor`): the rdrop loss of p and q
        """
        p_loss = F.kl_div(F.log_softmax(p, axis=-1),
                          F.softmax(q, axis=-1),
                          reduction=self.reduction)
        q_loss = F.kl_div(F.log_softmax(q, axis=-1),
                          F.softmax(p, axis=-1),
                          reduction=self.reduction)

        # pad_mask is for seq-level tasks
        if pad_mask is not None:
            p_loss = paddle.masked_select(p_loss, pad_mask)
            q_loss = paddle.masked_select(q_loss, pad_mask)

        # You can choose whether to use function "sum" and "mean" depending on your task
        p_loss = p_loss.sum()
        q_loss = q_loss.sum()
        loss = (p_loss + q_loss) / 2
        return loss
Exemplo n.º 2
0
def batch_predict(
        model, 
        data_loader,
        rel_vocab,
        word_pad_index,
        word_bos_index,
        word_eos_index,
    ):
    
    model.eval()
    arcs, rels = [], []
    for inputs in data_loader():
        if args.encoding_model.startswith("ernie") or args.encoding_model == "lstm-pe":
            words = inputs[0]
            words, feats = flat_words(words)
            s_arc, s_rel, words = model(words, feats)
        else:
            words, feats = inputs
            s_arc, s_rel, words = model(words, feats)

        mask = paddle.logical_and(
            paddle.logical_and(words != word_pad_index, words != word_bos_index),
            words != word_eos_index,
        )

        lens = paddle.sum(paddle.cast(mask, "int32"), axis=-1)
        arc_preds, rel_preds = decode(s_arc, s_rel, mask)
        arcs.extend(paddle.split(paddle.masked_select(arc_preds, mask), lens.numpy().tolist()))
        rels.extend(paddle.split(paddle.masked_select(rel_preds, mask), lens.numpy().tolist()))

    arcs = [[str(s) for s in seq.numpy().tolist()] for seq in arcs]
    rels = [rel_vocab.to_tokens(seq.numpy().tolist()) for seq in rels]           

    return arcs, rels
Exemplo n.º 3
0
    def forward(self, input, target):
        """
        Args:
            inputs: feature matrix with shape (batch_size, feat_dim)
            target: ground truth labels with shape (num_classes)
        """
        inputs = input["features"]

        if self.normalize_feature:
            inputs = 1. * inputs / (paddle.expand_as(
                paddle.norm(inputs, p=2, axis=-1, keepdim=True), inputs) +
                                    1e-12)

        bs = inputs.shape[0]

        # compute distance
        dist = paddle.pow(inputs, 2).sum(axis=1, keepdim=True).expand([bs, bs])
        dist = dist + dist.t()
        dist = paddle.addmm(input=dist,
                            x=inputs,
                            y=inputs.t(),
                            alpha=-2.0,
                            beta=1.0)
        dist = paddle.clip(dist, min=1e-12).sqrt()

        # hard negative mining
        is_pos = paddle.expand(target, (bs, bs)).equal(
            paddle.expand(target, (bs, bs)).t())
        is_neg = paddle.expand(target, (bs, bs)).not_equal(
            paddle.expand(target, (bs, bs)).t())

        # `dist_ap` means distance(anchor, positive)
        ## both `dist_ap` and `relative_p_inds` with shape [N, 1]
        '''
        dist_ap, relative_p_inds = paddle.max(
            paddle.reshape(dist[is_pos], (bs, -1)), axis=1, keepdim=True)
        # `dist_an` means distance(anchor, negative)
        # both `dist_an` and `relative_n_inds` with shape [N, 1]
        dist_an, relative_n_inds = paddle.min(
            paddle.reshape(dist[is_neg], (bs, -1)), axis=1, keepdim=True)
        '''
        dist_ap = paddle.max(paddle.reshape(paddle.masked_select(dist, is_pos),
                                            (bs, -1)),
                             axis=1,
                             keepdim=True)
        # `dist_an` means distance(anchor, negative)
        # both `dist_an` and `relative_n_inds` with shape [N, 1]
        dist_an = paddle.min(paddle.reshape(paddle.masked_select(dist, is_neg),
                                            (bs, -1)),
                             axis=1,
                             keepdim=True)
        # shape [N]
        dist_ap = paddle.squeeze(dist_ap, axis=1)
        dist_an = paddle.squeeze(dist_an, axis=1)

        # Compute ranking hinge loss
        y = paddle.ones_like(dist_an)
        loss = self.ranking_loss(dist_an, dist_ap, y)
        return {"TripletLossV2": loss}
Exemplo n.º 4
0
 def create_loss(self, prediction, mask):
     loss_fct = paddle.nn.BCEWithLogitsLoss()
     pos_logits, neg_logits = prediction
     pos_labels, neg_labels = paddle.ones_like(
         pos_logits), paddle.zeros_like(neg_logits)
     loss = loss_fct(paddle.masked_select(pos_logits, mask),
                     paddle.masked_select(pos_labels, mask))
     loss += loss_fct(paddle.masked_select(neg_logits, mask),
                      paddle.masked_select(neg_labels, mask))
     return loss
Exemplo n.º 5
0
    def get_mc_loss(self, feat, inputs):
        # feat.shape = [bs, ch_emb, h, w]
        assert 'cls_id_map' in inputs and 'cls_tr_ids' in inputs
        index = inputs['index']
        mask = inputs['index_mask']
        cls_id_map = inputs['cls_id_map']  # [bs, h, w]
        cls_tr_ids = inputs['cls_tr_ids']  # [bs, num_classes, h, w]

        feat = paddle.transpose(feat, perm=[0, 2, 3, 1])
        feat_n, feat_h, feat_w, feat_c = feat.shape
        feat = paddle.reshape(feat, shape=[feat_n, -1, feat_c])

        index = paddle.unsqueeze(index, 2)
        batch_inds = list()
        for i in range(feat_n):
            batch_ind = paddle.full(
                shape=[1, index.shape[1], 1], fill_value=i, dtype='int64')
            batch_inds.append(batch_ind)
        batch_inds = paddle.concat(batch_inds, axis=0)
        index = paddle.concat(x=[batch_inds, index], axis=2)
        feat = paddle.gather_nd(feat, index=index)

        mask = paddle.unsqueeze(mask, axis=2)
        mask = paddle.expand_as(mask, feat)
        mask.stop_gradient = True
        feat = paddle.masked_select(feat, mask > 0)
        feat = paddle.reshape(feat, shape=[-1, feat_c])

        reid_losses = 0
        for cls_id, id_num in self.num_identities_dict.items():
            # target
            cur_cls_tr_ids = paddle.reshape(
                cls_tr_ids[:, cls_id, :, :], shape=[feat_n, -1])  # [bs, h*w]
            cls_id_target = paddle.gather_nd(cur_cls_tr_ids, index=index)
            mask = inputs['index_mask']
            cls_id_target = paddle.masked_select(cls_id_target, mask > 0)
            cls_id_target.stop_gradient = True

            # feat
            cls_id_feat = self.emb_scale_dict[str(cls_id)] * F.normalize(feat)
            cls_id_pred = self.classifiers[str(cls_id)](cls_id_feat)

            loss = self.reid_loss(cls_id_pred, cls_id_target)
            valid = (cls_id_target != self.reid_loss.ignore_index)
            valid.stop_gradient = True
            count = paddle.sum((paddle.cast(valid, dtype=np.int32)))
            count.stop_gradient = True
            if count > 0:
                loss = loss / count
            reid_losses += loss

        return reid_losses
Exemplo n.º 6
0
Arquivo: model.py Projeto: WenjinW/PGL
 def forward(self, node_repr, bond_length_index, bond_length, mask):
     node_i, node_j = bond_length_index
     node_i_repr = paddle.index_select(node_repr, node_i)
     node_j_repr = paddle.index_select(node_repr, node_j)
     node_ij_repr = paddle.concat([node_i_repr, node_j_repr], 1)
     bond_length_pred = self.bond_length_pred_linear(node_ij_repr)
     bond_length_pred = paddle.masked_select(bond_length_pred, mask)
     bond_length_pred = paddle.reshape(bond_length_pred, (-1, ))
     bond_length = paddle.masked_select(bond_length, mask)
     bond_length = paddle.reshape(bond_length, (-1, ))
     loss = self.loss(bond_length_pred, bond_length)
     loss = paddle.mean(loss)
     return loss
Exemplo n.º 7
0
def train(model, data_loader, optimizer, lr_scheduler, epoch, LOG):

    stages = 4
    losses = [AverageMeter() for _ in range(stages)]
    length_loader = len(data_loader)

    model.train()

    for batch_id, data in enumerate(data_loader()):
        left_img, right_img, gt = data

        mask = paddle.to_tensor(gt.numpy() > 0)
        gt_mask = paddle.masked_select(gt, mask)

        outputs = model(left_img, right_img)
        outputs = [paddle.squeeze(output) for output in outputs]

        tem_stage_loss = []
        for index in range(stages):
            temp_loss = args.loss_weights[index] * F.smooth_l1_loss(
                paddle.masked_select(outputs[index], mask),
                gt_mask,
                reduction='mean')
            tem_stage_loss.append(temp_loss)
            losses[index].update(
                float(temp_loss.numpy() / args.loss_weights[index]))

        sum_loss = paddle.add_n(tem_stage_loss)
        sum_loss.backward()
        optimizer.step()
        optimizer.clear_grad()

        if batch_id % 5 == 0:
            info_str = [
                'Stage {} = {:.2f}({:.2f})'.format(x, losses[x].val,
                                                   losses[x].avg)
                for x in range(stages)
            ]
            info_str = '\t'.join(info_str)
            info_str = 'Train Epoch{} [{}/{}]  lr:{:.5f}\t{}'.format(
                epoch, batch_id, length_loader, optimizer.get_lr(), info_str)
            LOG.info(info_str)

    lr_scheduler.step()

    info_str = '\t'.join(
        ['Stage {} = {:.2f}'.format(x, losses[x].avg) for x in range(stages)])
    LOG.info('Average train loss: ' + info_str)
Exemplo n.º 8
0
    def ohem_single(self, score, gt_text, training_mask, ohem_ratio=3):
        pos_num = int(paddle.sum((gt_text > 0.5).astype('float32'))) - int(
            paddle.sum(
                paddle.logical_and((gt_text > 0.5), (training_mask <= 0.5))
                .astype('float32')))

        if pos_num == 0:
            selected_mask = training_mask
            selected_mask = selected_mask.reshape(
                [1, selected_mask.shape[0], selected_mask.shape[1]]).astype(
                    'float32')
            return selected_mask

        neg_num = int(paddle.sum((gt_text <= 0.5).astype('float32')))
        neg_num = int(min(pos_num * ohem_ratio, neg_num))

        if neg_num == 0:
            selected_mask = training_mask
            selected_mask = selected_mask.view(
                1, selected_mask.shape[0],
                selected_mask.shape[1]).astype('float32')
            return selected_mask

        neg_score = paddle.masked_select(score, gt_text <= 0.5)
        neg_score_sorted = paddle.sort(-neg_score)
        threshold = -neg_score_sorted[neg_num - 1]

        selected_mask = paddle.logical_and(
            paddle.logical_or((score >= threshold), (gt_text > 0.5)),
            (training_mask > 0.5))
        selected_mask = selected_mask.reshape(
            [1, selected_mask.shape[0], selected_mask.shape[1]]).astype(
                'float32')
        return selected_mask
Exemplo n.º 9
0
    def _shard_edges_by_dst(self, edges, edge_feat):
        """Shard Edges by dst

        Args:

            edges: list of (u, v) tuples, 2D numpy.ndarry or 2D paddle.Tensor 

            edge_feat (optional): a dict of numpy array as edge features (should
                                have consistent order with edges)

        Returns:
     
            Return a tuple (shard_edges, shard_edge_feat) as the shard results.

        """
        shard_flag = edges[:, 1]
        mask = (shard_flag % dist.get_world_size()) == dist.get_rank()
        if type(mask) == paddle.Tensor:
            eid = paddle.masked_select(paddle.arange(edges.shape[0]), mask)
            shard_edges = paddle.gather(edges, eid)
            shard_edge_feat = {}
            for key, value in edge_feat.items():
                shard_edge_feat[key] = paddle.gather(value, eid)
        else:
            eid = np.arange(edges.shape[0])[mask]
            shard_edges = edges[eid]
            shard_edge_feat = {}
            for key, value in edge_feat.items():
                shard_edge_feat[key] = value[eid]
        return shard_edges, shard_edge_feat
Exemplo n.º 10
0
    def __call__(self, s_arc, s_rel, arcs, rels, mask):

        arcs = paddle.masked_select(arcs, mask)
        rels = paddle.masked_select(rels, mask)

        select = paddle.nonzero(mask)
        s_arc = paddle.gather_nd(s_arc, select)
        s_rel = paddle.gather_nd(s_rel, select)

        s_rel = index_sample(s_rel, paddle.unsqueeze(arcs, axis=1))

        arc_cost = F.cross_entropy(s_arc, arcs)
        rel_cost = F.cross_entropy(s_rel, rels)

        avg_cost = paddle.mean(arc_cost + rel_cost)
        return avg_cost
Exemplo n.º 11
0
def _selected_pixel(ref_labels_flat, ref_emb_flat):
    index_list = paddle.arange(len(ref_labels_flat))
    index_list = index_list
    index_ = paddle.masked_select(index_list, ref_labels_flat != -1)

    index_ = long_(index_)
    ref_labels_flat = paddle.index_select(ref_labels_flat, index_, 0)
    ref_emb_flat = paddle.index_select(ref_emb_flat, index_, 0)

    return ref_labels_flat, ref_emb_flat
Exemplo n.º 12
0
Arquivo: model.py Projeto: WenjinW/PGL
    def forward(self, node_repr, bond_angle_index, bond_angle, mask):
        node_i, node_j, node_k = bond_angle_index
        node_i_repr = paddle.index_select(node_repr, node_i)
        node_j_repr = paddle.index_select(node_repr, node_j)
        node_k_repr = paddle.index_select(node_repr, node_k)

        node_ijk_repr = paddle.concat([node_i_repr, node_j_repr, node_k_repr],
                                      axis=1)
        bond_angle_pred = self.bond_angle_pred_linear(node_ijk_repr)
        bond_angle_pred = paddle.masked_select(bond_angle_pred, mask)
        bond_angle_pred = paddle.reshape(bond_angle_pred, [
            -1,
        ])
        bond_angle = paddle.masked_select(bond_angle, mask)
        bond_angle = paddle.reshape(bond_angle, [
            -1,
        ])
        loss = self.loss(bond_angle_pred, bond_angle)
        loss = paddle.mean(loss)
        return loss
Exemplo n.º 13
0
 def test_imperative_mode(self):
     paddle.disable_static()
     shape = (88, 6, 8)
     np_x = np.random.random(shape).astype('float32')
     np_mask = np.array(np.random.randint(2, size=shape, dtype=bool))
     x = paddle.to_tensor(np_x)
     mask = paddle.to_tensor(np_mask)
     out = paddle.masked_select(x, mask)
     np_out = np_masked_select(np_x, np_mask)
     self.assertEqual(np.allclose(out.numpy(), np_out), True)
     paddle.enable_static()
Exemplo n.º 14
0
Arquivo: main.py Projeto: Yelrose/PGL
def evaluate(model, loader, criterion, evaluator):
    model.eval()
    total_loss = []
    y_true = []
    y_pred = []
    is_valid = []

    for idx, batch_data in enumerate(loader):
        g, mh_graphs, labels, unmask = batch_data
        g = g.tensor()
        multihop_graphs = []
        for item in mh_graphs:
            multihop_graphs.append(item.tensor())
        g.multi_hop_graphs = multihop_graphs
        labels = paddle.to_tensor(labels)
        unmask = paddle.to_tensor(unmask)

        pred = model(g)
        eval_loss = criterion(paddle.masked_select(pred, unmask),
                              paddle.masked_select(labels, unmask))
        total_loss.append(eval_loss.numpy())

        y_pred.append(pred.numpy())
        y_true.append(labels.numpy())
        is_valid.append(unmask.numpy())

    y_pred = np.concatenate(y_pred)
    y_true = np.concatenate(y_true)
    is_valid = np.concatenate(is_valid)
    is_valid = is_valid.astype("bool")
    y_true = y_true.astype("float32")
    y_true[~is_valid] = np.nan
    input_dict = {'y_true': y_true, 'y_pred': y_pred}
    result = evaluator.eval(input_dict)

    total_loss = np.mean(total_loss)
    model.train()

    return {"loss": total_loss, config.metrics: result[config.metrics]}
Exemplo n.º 15
0
    def get_loss(self, feat, inputs):
        index = inputs['index']
        mask = inputs['index_mask']
        target = inputs['reid']
        target = paddle.masked_select(target, mask > 0)
        target = paddle.unsqueeze(target, 1)

        feat = paddle.transpose(feat, perm=[0, 2, 3, 1])
        feat_n, feat_h, feat_w, feat_c = feat.shape
        feat = paddle.reshape(feat, shape=[feat_n, -1, feat_c])
        index = paddle.unsqueeze(index, 2)
        batch_inds = list()
        for i in range(feat_n):
            batch_ind = paddle.full(
                shape=[1, index.shape[1], 1], fill_value=i, dtype='int64')
            batch_inds.append(batch_ind)
        batch_inds = paddle.concat(batch_inds, axis=0)
        index = paddle.concat(x=[batch_inds, index], axis=2)
        feat = paddle.gather_nd(feat, index=index)

        mask = paddle.unsqueeze(mask, axis=2)
        mask = paddle.expand_as(mask, feat)
        mask.stop_gradient = True
        feat = paddle.masked_select(feat, mask > 0)
        feat = paddle.reshape(feat, shape=[-1, feat_c])
        feat = F.normalize(feat)
        feat = self.emb_scale * feat
        logit = self.classifier(feat)
        target.stop_gradient = True
        loss = self.reid_loss(logit, target)
        valid = (target != self.reid_loss.ignore_index)
        valid.stop_gradient = True
        count = paddle.sum((paddle.cast(valid, dtype=np.int32)))
        count.stop_gradient = True
        if count > 0:
            loss = loss / count

        return loss
Exemplo n.º 16
0
 def convert_key_to_inttensor(key):
     if isinstance(key, np.ndarray):
         # print(max(args),min(args),self.shape,len(args),len(set(args)) )
         key=paddorch.from_numpy(key).long()
         # print("converted numpy", type(args))
     if  isinstance(key,paddle.Tensor):
         if key.dtype==paddle.fluid.core.VarDesc.VarType.BOOL:
             key = paddle.masked_select(paddle.arange(len(key)), key)
         elif key.dtype==paddle.fluid.core.VarDesc.VarType.INT32 or key.dtype==paddle.fluid.core.VarDesc.VarType.INT64:
             return key
         else:
             return key.astype("int32")
     if isinstance(key,int):
         return paddorch.LongTensor(np.array([key]))
     if isinstance(key,list):
         key = paddorch.from_numpy(key).long()
     return key
Exemplo n.º 17
0
    def forward(self, bond_types_batch, type_count_batch, bond_feat):
        """
        Input example:
            bond_types_batch: [0,0,2,0,1,2] + [0,0,2,0,1,2] + [2]
            type_count_batch: [[3, 3, 0], [1, 1, 0], [2, 2, 1]] # [num_type, batch_size]
        """
        bond_feat = self.fc_1(
            paddle.reshape(bond_feat, [-1, self.num_angle * self.bond_dim]))
        inter_mat_list = []
        for type_i in range(self.num_type):
            type_i_index = paddle.masked_select(paddle.arange(len(bond_feat)),
                                                bond_types_batch == type_i)
            if paddle.sum(type_count_batch[type_i]) == 0:
                inter_mat_list.append(
                    paddle.to_tensor(np.array([0.] *
                                              len(type_count_batch[type_i])),
                                     dtype='float32'))
                continue
            bond_feat_type_i = paddle.gather(bond_feat, type_i_index)
            graph_bond_index = op.get_index_from_counts(
                type_count_batch[type_i])
            # graph_bond_id = generate_segment_id_from_index(graph_bond_index)
            graph_bond_id = generate_segment_id(graph_bond_index)
            graph_feat_type_i = math.segment_pool(bond_feat_type_i,
                                                  graph_bond_id,
                                                  pool_type='sum')
            mat_flat_type_i = self.fc_2(graph_feat_type_i).squeeze(1)

            # print(graph_bond_id)
            # print(graph_bond_id.shape, graph_feat_type_i.shape, mat_flat_type_i.shape)
            my_pad = nn.Pad1D(padding=[
                0, len(type_count_batch[type_i]) - len(mat_flat_type_i)
            ],
                              value=-1e9)
            mat_flat_type_i = my_pad(mat_flat_type_i)
            inter_mat_list.append(mat_flat_type_i)

        inter_mat_batch = paddle.stack(inter_mat_list,
                                       axis=1)  # [batch_size, num_type]
        inter_mat_mask = paddle.ones_like(inter_mat_batch) * -1e9
        inter_mat_batch = paddle.where(
            type_count_batch.transpose([1, 0]) > 0, inter_mat_batch,
            inter_mat_mask)
        inter_mat_batch = self.softmax(inter_mat_batch)
        return inter_mat_batch
Exemplo n.º 18
0
    def test_static_mode(self):
        shape = [8, 9, 6]
        x = paddle.data(shape=shape, dtype='float32', name='x')
        mask = paddle.data(shape=shape, dtype='bool', name='mask')
        np_x = np.random.random(shape).astype('float32')
        np_mask = np.array(np.random.randint(2, size=shape, dtype=bool))

        out = paddle.masked_select(x, mask)
        np_out = np_masked_select(np_x, np_mask)

        exe = paddle.static.Executor(place=paddle.CPUPlace())

        res = exe.run(paddle.static.default_main_program(),
                      feed={
                          "x": np_x,
                          "mask": np_mask
                      },
                      fetch_list=[out])
        self.assertEqual(np.allclose(res, np_out), True)
Exemplo n.º 19
0
Arquivo: conv.py Projeto: WenjinW/PGL
    def forward(self, graph, feature, norm=None):
        """
         
        Args:
 
            graph: `pgl.Graph` instance.

            feature: A tensor with shape (num_nodes, input_size)

            norm: (default None). If :code:`norm` is not None, then the feature will be normalized by given norm. If :code:`norm` is None, then we use `lapacian degree norm`.
     
        Return:

            A tensor with shape (num_nodes, output_size)

        """
        if self.self_loop:
            index = paddle.arange(start=0, end=graph.num_nodes, dtype="int64")
            self_loop_edges = paddle.transpose(paddle.stack((index, index)),
                                               [1, 0])

            mask = graph.edges[:, 0] != graph.edges[:, 1]
            mask_index = paddle.masked_select(
                paddle.arange(end=graph.num_edges), mask)
            edges = paddle.gather(graph.edges, mask_index)  # remove self loop

            edges = paddle.concat((self_loop_edges, edges), axis=0)
            graph = pgl.Graph(num_nodes=graph.num_nodes, edges=edges)

        if norm is None:
            norm = GF.degree_norm(graph)
        h0 = feature

        for _ in range(self.k_hop):
            feature = feature * norm
            feature = graph.send_recv(feature)
            feature = feature * norm
            feature = self.alpha * h0 + (1 - self.alpha) * feature

        return feature
Exemplo n.º 20
0
    def process_by_class(self, bboxes, embedding, bbox_inds, topk_clses):
        pred_dets, pred_embs = [], []
        for cls_id in range(self.num_classes):
            inds_masks = topk_clses == cls_id
            inds_masks = paddle.cast(inds_masks, 'float32')

            pos_num = inds_masks.sum().numpy()
            if pos_num == 0:
                continue

            cls_inds_mask = inds_masks > 0

            bbox_mask = paddle.nonzero(cls_inds_mask)
            cls_bboxes = paddle.gather_nd(bboxes, bbox_mask)
            pred_dets.append(cls_bboxes)

            cls_inds = paddle.masked_select(bbox_inds, cls_inds_mask)
            cls_inds = cls_inds.unsqueeze(-1)
            cls_embedding = paddle.gather_nd(embedding, cls_inds)
            pred_embs.append(cls_embedding)

        return paddle.concat(pred_dets), paddle.concat(pred_embs)
Exemplo n.º 21
0
 def forward(self, inputs, mask):
     """
     forward
     """
     x = paddle.masked_select(inputs, mask)
     return x
Exemplo n.º 22
0
Arquivo: main.py Projeto: Yelrose/PGL
def main(config):
    if dist.get_world_size() > 1:
        dist.init_parallel_env()

    if dist.get_rank() == 0:
        timestamp = datetime.now().strftime("%Hh%Mm%Ss")
        log_path = os.path.join(config.log_dir,
                                "tensorboard_log_%s" % timestamp)
        writer = SummaryWriter(log_path)

    log.info("loading data")
    raw_dataset = GraphPropPredDataset(name=config.dataset_name)
    config.num_class = raw_dataset.num_tasks
    config.eval_metric = raw_dataset.eval_metric
    config.task_type = raw_dataset.task_type

    mol_dataset = MolDataset(config,
                             raw_dataset,
                             transform=make_multihop_edges)
    splitted_index = raw_dataset.get_idx_split()
    train_ds = Subset(mol_dataset, splitted_index['train'], mode='train')
    valid_ds = Subset(mol_dataset, splitted_index['valid'], mode="valid")
    test_ds = Subset(mol_dataset, splitted_index['test'], mode="test")

    log.info("Train Examples: %s" % len(train_ds))
    log.info("Val Examples: %s" % len(valid_ds))
    log.info("Test Examples: %s" % len(test_ds))

    fn = CollateFn(config)

    train_loader = Dataloader(train_ds,
                              batch_size=config.batch_size,
                              shuffle=True,
                              num_workers=config.num_workers,
                              collate_fn=fn)

    valid_loader = Dataloader(valid_ds,
                              batch_size=config.batch_size,
                              num_workers=config.num_workers,
                              collate_fn=fn)

    test_loader = Dataloader(test_ds,
                             batch_size=config.batch_size,
                             num_workers=config.num_workers,
                             collate_fn=fn)

    model = ClassifierNetwork(config.hidden_size, config.out_dim,
                              config.num_layers, config.dropout_prob,
                              config.virt_node, config.K, config.conv_type,
                              config.appnp_hop, config.alpha)
    model = paddle.DataParallel(model)

    optim = Adam(learning_rate=config.lr, parameters=model.parameters())
    criterion = nn.loss.BCEWithLogitsLoss()

    evaluator = Evaluator(config.dataset_name)

    best_valid = 0

    global_step = 0
    for epoch in range(1, config.epochs + 1):
        model.train()
        for idx, batch_data in enumerate(train_loader):
            g, mh_graphs, labels, unmask = batch_data
            g = g.tensor()
            multihop_graphs = []
            for item in mh_graphs:
                multihop_graphs.append(item.tensor())
            g.multi_hop_graphs = multihop_graphs
            labels = paddle.to_tensor(labels)
            unmask = paddle.to_tensor(unmask)

            pred = model(g)
            pred = paddle.masked_select(pred, unmask)
            labels = paddle.masked_select(labels, unmask)
            train_loss = criterion(pred, labels)
            train_loss.backward()
            optim.step()
            optim.clear_grad()

            if global_step % 80 == 0:
                message = "train: epoch %d | step %d | " % (epoch, global_step)
                message += "loss %.6f" % (train_loss.numpy())
                log.info(message)
                if dist.get_rank() == 0:
                    writer.add_scalar("loss", train_loss.numpy(), global_step)
            global_step += 1

        valid_result = evaluate(model, valid_loader, criterion, evaluator)
        message = "valid: epoch %d | step %d | " % (epoch, global_step)
        for key, value in valid_result.items():
            message += " | %s %.6f" % (key, value)
            if dist.get_rank() == 0:
                writer.add_scalar("valid_%s" % key, value, global_step)
        log.info(message)

        test_result = evaluate(model, test_loader, criterion, evaluator)
        message = "test: epoch %d | step %d | " % (epoch, global_step)
        for key, value in test_result.items():
            message += " | %s %.6f" % (key, value)
            if dist.get_rank() == 0:
                writer.add_scalar("test_%s" % key, value, global_step)
        log.info(message)

        if best_valid < valid_result[config.metrics]:
            best_valid = valid_result[config.metrics]
            best_valid_result = valid_result
            best_test_result = test_result

        message = "best result: epoch %d | " % (epoch)
        message += "valid %s: %.6f | " % (config.metrics,
                                          best_valid_result[config.metrics])
        message += "test %s: %.6f | " % (config.metrics,
                                         best_test_result[config.metrics])
        log.info(message)

    message = "final eval best result:%.6f" % best_valid_result[config.metrics]
    log.info(message)
    message = "final test best result:%.6f" % best_test_result[config.metrics]
    log.info(message)
 def forward(self, x, y):
     """
     forward
     """
     x = paddle.masked_select(x, y)
     return x
Exemplo n.º 24
0
    def get_loss(self, head_outs, targets):
        pred_cls, pred_bboxes, pred_obj,\
        anchor_points, stride_tensor, num_anchors_list = head_outs
        gt_labels = targets['gt_class']
        gt_bboxes = targets['gt_bbox']
        pred_scores = (pred_cls * pred_obj).sqrt()
        # label assignment
        center_and_strides = paddle.concat(
            [anchor_points, stride_tensor, stride_tensor], axis=-1)
        pos_num_list, label_list, bbox_target_list = [], [], []
        for pred_score, pred_bbox, gt_box, gt_label in zip(
                pred_scores.detach(),
                pred_bboxes.detach() * stride_tensor, gt_bboxes, gt_labels):
            pos_num, label, _, bbox_target = self.assigner(
                pred_score, center_and_strides, pred_bbox, gt_box, gt_label)
            pos_num_list.append(pos_num)
            label_list.append(label)
            bbox_target_list.append(bbox_target)
        labels = paddle.to_tensor(np.stack(label_list, axis=0))
        bbox_targets = paddle.to_tensor(np.stack(bbox_target_list, axis=0))
        bbox_targets /= stride_tensor  # rescale bbox

        # 1. obj score loss
        mask_positive = (labels != self.num_classes)
        loss_obj = F.binary_cross_entropy(pred_obj,
                                          mask_positive.astype(
                                              pred_obj.dtype).unsqueeze(-1),
                                          reduction='sum')

        num_pos = sum(pos_num_list)

        if num_pos > 0:
            num_pos = paddle.to_tensor(num_pos, dtype=self._dtype).clip(min=1)
            loss_obj /= num_pos

            # 2. iou loss
            bbox_mask = mask_positive.unsqueeze(-1).tile([1, 1, 4])
            pred_bboxes_pos = paddle.masked_select(pred_bboxes,
                                                   bbox_mask).reshape([-1, 4])
            assigned_bboxes_pos = paddle.masked_select(
                bbox_targets, bbox_mask).reshape([-1, 4])
            bbox_iou = bbox_overlaps(pred_bboxes_pos, assigned_bboxes_pos)
            bbox_iou = paddle.diag(bbox_iou)

            loss_iou = self.iou_loss(pred_bboxes_pos.split(4, axis=-1),
                                     assigned_bboxes_pos.split(4, axis=-1))
            loss_iou = loss_iou.sum() / num_pos

            # 3. cls loss
            cls_mask = mask_positive.unsqueeze(-1).tile(
                [1, 1, self.num_classes])
            pred_cls_pos = paddle.masked_select(pred_cls, cls_mask).reshape(
                [-1, self.num_classes])
            assigned_cls_pos = paddle.masked_select(labels, mask_positive)
            assigned_cls_pos = F.one_hot(assigned_cls_pos,
                                         self.num_classes + 1)[..., :-1]
            assigned_cls_pos *= bbox_iou.unsqueeze(-1)
            loss_cls = F.binary_cross_entropy(pred_cls_pos,
                                              assigned_cls_pos,
                                              reduction='sum')
            loss_cls /= num_pos

            # 4. l1 loss
            if targets['epoch_id'] >= self.l1_epoch:
                loss_l1 = F.l1_loss(pred_bboxes_pos,
                                    assigned_bboxes_pos,
                                    reduction='sum')
                loss_l1 /= num_pos
            else:
                loss_l1 = paddle.zeros([1])
                loss_l1.stop_gradient = False
        else:
            loss_cls = paddle.zeros([1])
            loss_iou = paddle.zeros([1])
            loss_l1 = paddle.zeros([1])
            loss_cls.stop_gradient = False
            loss_iou.stop_gradient = False
            loss_l1.stop_gradient = False

        loss = self.loss_weight['obj'] * loss_obj + \
               self.loss_weight['cls'] * loss_cls + \
               self.loss_weight['iou'] * loss_iou

        if targets['epoch_id'] >= self.l1_epoch:
            loss += (self.loss_weight['l1'] * loss_l1)

        yolox_losses = {
            'loss': loss,
            'loss_cls': loss_cls,
            'loss_obj': loss_obj,
            'loss_iou': loss_iou,
            'loss_l1': loss_l1,
        }
        return yolox_losses
Exemplo n.º 25
0
    def get_loss(self, head_outs, gt_meta):
        pred_scores, pred_bboxes, anchors, \
        num_anchors_list, stride_tensor = head_outs
        gt_labels = gt_meta['gt_class']
        gt_bboxes = gt_meta['gt_bbox']
        pad_gt_mask = gt_meta['pad_gt_mask']
        # label assignment
        if gt_meta['epoch_id'] < self.static_assigner_epoch:
            assigned_labels, assigned_bboxes, assigned_scores = self.static_assigner(
                anchors,
                num_anchors_list,
                gt_labels,
                gt_bboxes,
                pad_gt_mask,
                bg_index=self.num_classes)
            alpha_l = 0.25
        else:
            assigned_labels, assigned_bboxes, assigned_scores = self.assigner(
                pred_scores.detach(),
                pred_bboxes.detach() * stride_tensor,
                bbox_center(anchors),
                num_anchors_list,
                gt_labels,
                gt_bboxes,
                pad_gt_mask,
                bg_index=self.num_classes)
            alpha_l = -1

        # rescale bbox
        assigned_bboxes /= stride_tensor
        # classification loss
        loss_cls = self._focal_loss(pred_scores,
                                    assigned_scores,
                                    alpha=alpha_l)
        # select positive samples mask
        mask_positive = (assigned_labels != self.num_classes)
        num_pos = mask_positive.astype(paddle.float32).sum()
        # bbox regression loss
        if num_pos > 0:
            bbox_mask = mask_positive.unsqueeze(-1).tile([1, 1, 4])
            pred_bboxes_pos = paddle.masked_select(pred_bboxes,
                                                   bbox_mask).reshape([-1, 4])
            assigned_bboxes_pos = paddle.masked_select(
                assigned_bboxes, bbox_mask).reshape([-1, 4])
            bbox_weight = paddle.masked_select(assigned_scores.sum(-1),
                                               mask_positive).unsqueeze(-1)
            # iou loss
            loss_iou = self.giou_loss(pred_bboxes_pos,
                                      assigned_bboxes_pos) * bbox_weight
            loss_iou = loss_iou.sum() / bbox_weight.sum()
            # l1 loss
            loss_l1 = F.l1_loss(pred_bboxes_pos, assigned_bboxes_pos)
        else:
            loss_iou = paddle.zeros([1])
            loss_l1 = paddle.zeros([1])

        loss_cls /= assigned_scores.sum().clip(min=1)
        loss = self.loss_weight['class'] * loss_cls + self.loss_weight[
            'iou'] * loss_iou

        return {
            'loss': loss,
            'loss_class': loss_cls,
            'loss_iou': loss_iou,
            'loss_l1': loss_l1
        }
Exemplo n.º 26
0
 def test_mask_dtype():
     paddle.masked_select(x, mask_float)
Exemplo n.º 27
0
 def test_mask_type():
     paddle.masked_select(x, np_mask)
Exemplo n.º 28
0
 def test_x_type():
     paddle.masked_select(np_x, mask)