def test(model, dataset, cfg, logger):
    if cfg.load_from:
        print('load from {}'.format(cfg.load_from))
        load_checkpoint(model, cfg.load_from, strict=True, logger=logger)

    losses = []
    edges = []
    scores = []

    if cfg.gpus == 1:
        data_loader = build_dataloader(dataset,
                                       cfg.batch_size_per_gpu,
                                       cfg.workers_per_gpu,
                                       train=False)

        model = MMDataParallel(model, device_ids=range(cfg.gpus))
        if cfg.cuda:
            model.cuda()

        model.eval()
        for i, (data, cid, node_list) in enumerate(data_loader):
            with torch.no_grad():
                _, _, h1id, gtmat = data
                pred, loss = model(data, return_loss=True)
                losses += [loss.item()]
                pred = F.softmax(pred, dim=1)
                if i % cfg.log_config.interval == 0:
                    if dataset.ignore_label:
                        logger.info('[Test] Iter {}/{}'.format(
                            i, len(data_loader)))
                    else:
                        acc, p, r = online_evaluate(gtmat, pred)
                        logger.info(
                            '[Test] Iter {}/{}: Loss {:.4f}, '
                            'Accuracy {:.4f}, Precision {:.4f}, Recall {:.4f}'.
                            format(i, len(data_loader), loss, acc, p, r))

                node_list = node_list.numpy()
                bs = len(cid)
                h1id_num = len(h1id[0])
                for b in range(bs):
                    cidb = cid[b].int().item()
                    nlst = node_list[b]
                    center_idx = nlst[cidb]
                    for j, n in enumerate(h1id[b]):
                        edges.append([center_idx, nlst[n.item()]])
                        scores.append(pred[b * h1id_num + j, 1].item())
    else:
        raise NotImplementedError

    if not dataset.ignore_label:
        avg_loss = sum(losses) / len(losses)
        logger.info('[Test] Overall Loss {:.4f}'.format(avg_loss))

    return np.array(edges), np.array(scores), len(dataset)
def train_lgcn(model, cfg, logger):
    # prepare data loaders
    for k, v in cfg.model['kwargs'].items():
        setattr(cfg.train_data, k, v)
    dataset = build_dataset(cfg.train_data)
    data_loaders = [
        build_dataloader(dataset,
                         cfg.batch_size_per_gpu,
                         cfg.workers_per_gpu,
                         train=True,
                         shuffle=True)
    ]

    # train
    if cfg.distributed:
        raise NotImplementedError
    else:
        _single_train(model, data_loaders, cfg)
Exemple #3
0
def test(model, dataset, cfg, logger):
    if cfg.load_from:
        print('load from {}'.format(cfg.load_from))
        load_checkpoint(model, cfg.load_from, strict=True, logger=logger)

    losses = []
    accs = []
    pred_conns = []

    max_lst = []
    multi_max = []

    if cfg.gpus == 1:
        data_loader = build_dataloader(dataset,
                                       cfg.batch_size_per_gpu,
                                       cfg.workers_per_gpu,
                                       train=False)
        size = len(data_loader)

        model = MMDataParallel(model, device_ids=range(cfg.gpus))
        if cfg.cuda:
            model.cuda()

        model.eval()
        for i, data in enumerate(data_loader):
            with torch.no_grad():
                output, loss = model(data, return_loss=True)
                if not dataset.ignore_label:
                    labels = data[2].view(-1)
                    if not cfg.regressor:
                        acc = output_accuracy(output, labels)
                        accs += [acc.item()]
                    losses += [loss.item()]
                if not cfg.regressor:
                    output = output[:, 1]
                if cfg.max_conn == 1:
                    output_max = output.max()
                    pred = (output == output_max).nonzero().view(-1)
                    pred_size = len(pred)
                    if pred_size > 1:
                        multi_max.append(pred_size)
                        pred_i = np.random.choice(np.arange(pred_size))
                    else:
                        pred_i = 0
                    pred = [int(pred[pred_i].detach().cpu().numpy())]
                    max_lst.append(output_max.detach().cpu().numpy())
                elif cfg.max_conn > 1:
                    output = output.detach().cpu().numpy()
                    pred = output.argpartition(cfg.max_conn)[:cfg.max_conn]
                pred_conns.append(pred)
                if i % cfg.log_config.interval == 0:
                    if dataset.ignore_label:
                        logger.info('[Test] Iter {}/{}'.format(i, size))
                    else:
                        logger.info('[Test] Iter {}/{}: Loss {:.4f}'.format(
                            i, size, loss))
    else:
        raise NotImplementedError

    if not dataset.ignore_label:
        avg_loss = sum(losses) / len(losses)
        logger.info('[Test] Overall Loss {:.4f}'.format(avg_loss))
        if not cfg.regressor:
            avg_acc = sum(accs) / len(accs)
            logger.info('[Test] Overall Accuracy {:.4f}'.format(avg_acc))
    if size > 0:
        logger.info('max val: mean({:.2f}), max({:.2f}), min({:.2f})'.format(
            sum(max_lst) / size, max(max_lst), min(max_lst)))
    multi_max_size = len(multi_max)
    if multi_max_size > 0:
        logger.info('multi-max({:.2f}): mean({:.1f}), max({}), min({})'.format(
            1. * multi_max_size / size,
            sum(multi_max) / multi_max_size, max(multi_max), min(multi_max)))

    return np.array(pred_conns)