Exemple #1
0
def main(_config):

    print(_config)

    savedir_root = _config['training']['savedir']
    device = torch.device(_config['misc']['device'])
    rootdir = os.path.join(_config['dataset']['datasetdir'],
                           _config['dataset']['dataset'])

    # create the filelits (train / val) according to area
    print("Create filelist...", end="")
    filelist_test = []
    for area_idx in range(1, 7):
        folder = os.path.join(rootdir, f"Area_{area_idx}")
        datasets = [
            os.path.join(f"Area_{area_idx}", dataset)
            for dataset in os.listdir(folder)
        ]
        if area_idx == _config['dataset']['area']:
            filelist_test = filelist_test + datasets
    filelist_test.sort()
    print(f"done, {len(filelist_test)} test files")

    N_CLASSES = 13

    # create the network
    print("Creating the network...", end="", flush=True)

    def network_function():
        return get_network(_config["network"]["model"],
                           in_channels=3,
                           out_channels=N_CLASSES,
                           backend_conv=_config["network"]["backend_conv"],
                           backend_search=_config["network"]["backend_search"],
                           config=_config,
                           loadSubModelWeights=False)

    net = network_function()
    net.load_state_dict(
        torch.load(os.path.join(savedir_root, "checkpoint.pth"))["state_dict"])
    net.to(device)
    net.eval()

    # create the global confusion matrix
    cm_global = np.zeros((N_CLASSES, N_CLASSES))

    for filename in filelist_test:

        # create the dataloader
        ds = Dataset(filename,
                     rootdir,
                     block_size=_config['dataset']['pillar_size'],
                     npoints=_config['dataset']['npoints'],
                     rgb=_config['training']['rgb'],
                     step=_config['test']['step'],
                     network_function=network_function)

        test_loader = torch.utils.data.DataLoader(
            ds,
            batch_size=_config['test']['batchsize'],
            shuffle=False,
            num_workers=_config['misc']['threads'])

        # create a score accumulator
        scores = np.zeros((ds.xyzrgb.shape[0], N_CLASSES))

        # iterate over the dataloader
        t = tqdm(test_loader, ncols=100, desc=filename)
        with torch.no_grad():
            for data in t:

                pts = data['pts'].to(device)
                features = data['features'].to(device)
                pts_ids = data['pts_ids']
                net_ids = data["net_indices"]
                net_pts = data["net_support"]
                for i in range(len(net_ids)):
                    net_ids[i] = net_ids[i].to(device)
                for i in range(len(net_pts)):
                    net_pts[i] = net_pts[i].to(device)

                outputs = net(features,
                              pts,
                              indices=net_ids,
                              support_points=net_pts)
                outputs_np = outputs.transpose(
                    1, 2).cpu().detach().numpy().reshape((-1, N_CLASSES))
                scores[pts_ids.numpy().ravel()] += outputs_np

        # get the original points
        original_points = ds.xyzrgb[:, :3]
        original_labels = ds.labels

        # mask = np.logical_and((np.abs(scores).sum(1) > 0), np.argmax(scores, axis=1) == np.argmax(scores_noc, axis=1))

        # compute the mask of points seen at prediction time
        mask = (np.abs(scores).sum(1) > 0)
        seen_scores = scores[mask]
        seen_points = original_points[mask]

        # project the scores on the original points
        scores = nearest_correspondance(
            torch.from_numpy(seen_points).float().transpose(0, 1),
            torch.from_numpy(original_points).float().transpose(0, 1),
            torch.from_numpy(seen_scores).float().transpose(0, 1),
            K=1).transpose(0, 1).numpy()
        original_preds = np.argmax(scores, axis=1)

        # confusion matrix
        cm = confusion_matrix(original_labels,
                              original_preds,
                              labels=list(range(N_CLASSES)))
        cm_global += cm

        print("IoU", metrics.stats_iou_per_class(cm)[0])

        # saving results
        savedir_results = os.path.join(
            savedir_root, f"results_step{_config['test']['step']}", filename)

        # saving labels
        if _config['test']['savepreds']:
            os.makedirs(savedir_results, exist_ok=True)
            np.savetxt(os.path.join(savedir_results, "pred.txt"),
                       original_preds,
                       fmt='%d')

        if _config['test']['savepts']:
            os.makedirs(savedir_results, exist_ok=True)
            original_preds = np.expand_dims(original_preds, 1).astype(int)
            original_points = ds.xyzrgb
            original_points = np.concatenate([original_points, original_preds],
                                             axis=1)
            np.savetxt(os.path.join(savedir_results, "pts.txt"),
                       original_points,
                       fmt=['%.4f', '%.4f', '%.4f', '%d', '%d', '%d', '%d'])

    print(
        "WARNING: The next scores requires to be check with evaluation script at Convpoint repo"
    )
    print("TODO: check if OK with eval scrip")
    iou = metrics.stats_iou_per_class(cm_global)
    print("Global IoU")
    print(iou[0])
    print("Global IoU per class")
    print(iou[1])

    print(f"{iou[0]} ", end="", flush=True)
    for i in range(iou[1].shape[0]):
        print(f"{iou[1][i]} ", end="")
    print("")
Exemple #2
0
def main(_run, _config):

    print(_config)

    savedir_root = _config["training"]["savedir"]
    device = torch.device(_config["misc"]["device"])

    # save the config file
    os.makedirs(savedir_root, exist_ok=True)
    save_config_file(eval(str(_config)), os.path.join(savedir_root, "config.yaml"))

    print("get the data path...", end="", flush=True)
    rootdir = _config["dataset"]["dir"]
    print("done")

    N_CLASSES = 50

    print("Creating network...", end="", flush=True)

    def network_function():
        return Network(
            1, N_CLASSES,
            get_conv(_config["network"]["backend_conv"]),
            get_search(_config["network"]["backend_search"]),
        )

    net = network_function()
    net.to(device)
    network_parameters = count_parameters(net)
    print("parameters", network_parameters)

    training_transformations = [
        lcp_transfo.UnitBallNormalize(),
        lcp_transfo.RandomSubSample(_config["dataset"]["npoints"]),
        lcp_transfo.NormalPerturbation(sigma=0.001)
    ]
    test_transformations = [
        lcp_transfo.UnitBallNormalize(),
        lcp_transfo.RandomSubSample(_config["dataset"]["npoints"]),
    ]

    print("Creating dataloader...", end="", flush=True)
    ds = Dataset(
        rootdir,
        'training',
        network_function=network_function,
        transformations_points=training_transformations
    )
    train_loader = torch.utils.data.DataLoader(
        ds,
        batch_size=_config["training"]["batchsize"],
        shuffle=True,
        num_workers=_config["misc"]["threads"],
    )
    ds_test = Dataset(
        rootdir,
        'test',
        network_function=network_function,
        transformations_points=test_transformations
    )
    test_loader = torch.utils.data.DataLoader(
        ds_test,
        batch_size=_config["training"]["batchsize"],
        shuffle=False,
        num_workers=_config["misc"]["threads"],
    )
    print("Done")


    # define weights
    print("Computing weights...", end="", flush=True)
    weights = torch.from_numpy(ds.get_weights()).float().to(device)
    print("Done")

    print("Creating optimizer...", end="", flush=True)
    optimizer = torch.optim.Adam(net.parameters(), lr=_config["training"]["lr_start"], eps=1e-3)
    epoch_start = 0
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer,
        _config["training"]["milestones"],
        gamma=_config["training"]["gamma"],
        last_epoch=epoch_start - 1,
    )
    print("Done")


    def get_data(data):

        pts = data["pts"].to(device)
        features = data["features"].to(device)
        seg = data["seg"].to(device)
        labels = data["label"]
        net_ids = data["net_indices"]
        net_pts = data["net_support"]
        for i in range(len(net_ids)):
            net_ids[i] = net_ids[i].to(device)
        for i in range(len(net_pts)):
            net_pts[i] = net_pts[i].to(device)

        return pts, features, seg, labels, net_ids, net_pts


    # create the log file
    for epoch in range(epoch_start, _config["training"]["epoch_nbr"]):

        # train
        net.train()
        cm = np.zeros((N_CLASSES, N_CLASSES))
        t = tqdm(
            train_loader,
            ncols=120,
            desc=f"Epoch {epoch}",
            disable=_config["misc"]["disable_tqdm"],
        )
        for data in t:

            pts, features, seg, labels, net_ids, net_pts = get_data(data)

            optimizer.zero_grad()
            outputs = net(features, pts, support_points=net_pts, indices=net_ids)
            loss = F.cross_entropy(outputs, seg, weight=weights)
            loss.backward()
            optimizer.step()

            outputs_np = outputs.cpu().detach().numpy()
            for i in range(pts.size(0)):
                # get the number of part for the shape
                object_label = labels[i]
                part_start, part_end = ds.category_range[object_label]

                outputs_np[i, :part_start] = -1e7
                outputs_np[i, part_end:] = -1e7

            output_np = np.argmax(outputs_np, axis=1).copy()
            target_np = seg.cpu().numpy().copy()

            cm_ = confusion_matrix(
                target_np.ravel(), output_np.ravel(), labels=list(range(N_CLASSES))
            )
            cm += cm_

            oa = "{:.3f}".format(metrics.stats_overall_accuracy(cm))
            aa = "{:.3f}".format(metrics.stats_accuracy_per_class(cm)[0])
            iou = "{:.3f}".format(metrics.stats_iou_per_class(cm)[0])

            t.set_postfix(OA=oa, AA=aa, IOU=iou)

        # eval (this is not the final evaluation, see dedicated evaluation)
        net.eval()
        with torch.no_grad():
            cm = np.zeros((N_CLASSES, N_CLASSES))
            t = tqdm(
                test_loader,
                ncols=120,
                desc=f"Test {epoch}",
                disable=_config["misc"]["disable_tqdm"],
            )
            for data in t:

                pts, features, seg, labels, net_ids, net_pts = get_data(data)

                outputs = net(features, pts, support_points=net_pts, indices=net_ids)
                loss = 0

                for i in range(pts.size(0)):
                    # get the number of part for the shape
                    object_label = labels[i]
                    part_start, part_end = ds_test.category_range[object_label]

                    outputs_ = (outputs[i, part_start:part_end]).unsqueeze(0)
                    seg_ = (seg[i] - part_start).unsqueeze(0)

                    loss = loss + weights[object_label] * F.cross_entropy(
                        outputs_, seg_
                    )

                outputs_np = outputs.cpu().detach().numpy()
                for i in range(pts.size(0)):
                    # get the number of part for the shape
                    object_label = labels[i]
                    part_start, part_end = ds_test.category_range[object_label]

                    outputs_np[i, :part_start] = -1e7
                    outputs_np[i, part_end:] = -1e7

                output_np = np.argmax(outputs_np, axis=1).copy()
                target_np = seg.cpu().numpy().copy()

                cm_ = confusion_matrix(
                    target_np.ravel(), output_np.ravel(), labels=list(range(N_CLASSES))
                )
                cm += cm_

                oa_test = "{:.3f}".format(metrics.stats_overall_accuracy(cm))
                aa_test = "{:.3f}".format(metrics.stats_accuracy_per_class(cm)[0])
                iou_test = "{:.3f}".format(metrics.stats_iou_per_class(cm)[0])

                t.set_postfix(OA=oa_test, AA=aa_test, IOU=iou_test)

        # scheduler update
        scheduler.step()

        # save the model
        os.makedirs(savedir_root, exist_ok=True)
        torch.save(
            {
                "epoch": epoch + 1,
                "state_dict": net.state_dict(),
                "optimizer": optimizer.state_dict(),
            },
            os.path.join(savedir_root, "checkpoint.pth"),
        )

        # write the logs
        logs = open(os.path.join(savedir_root, "log.txt"), "a+")
        logs.write(f"{epoch} {oa} {aa} {iou} {oa_test} {aa_test} {iou_test} \n")
        logs.close()

        _run.log_scalar("trainOA", oa, epoch)
        _run.log_scalar("trainAA", aa, epoch)
        _run.log_scalar("trainIoU", iou, epoch)
        _run.log_scalar("testOA", oa_test, epoch)
        _run.log_scalar("testAA", aa_test, epoch)
        _run.log_scalar("testIoU", iou_test, epoch)

    logs.close()
Exemple #3
0
def main(_run, _config):

    print(_config)
    savedir_root = _config["training"]["savedir"]
    device = torch.device(_config["misc"]["device"])

    # save the config file in the directory to restore the configuration
    os.makedirs(savedir_root, exist_ok=True)
    save_config_file(eval(str(_config)), os.path.join(savedir_root, "config.yaml"))

    # parameters for training
    N_LABELS = 40
    input_channels = 1

    print("Creating network...", end="", flush=True)

    def network_function():
        return get_network(
            _config["network"]["model"],
            input_channels,
            N_LABELS,
            _config["network"]["backend_conv"],
            _config["network"]["backend_search"],
        )

    net = network_function()
    net.to(device)
    print("Number of parameters", count_parameters(net))

    print("get the data path...", end="", flush=True)
    rootdir = os.path.join(_config["dataset"]["dir"])
    print("done")

    training_transformations = [
        lcp_transfo.UnitBallNormalize(),
        lcp_transfo.RandomSubSample(_config["dataset"]["npoints"]),
        lcp_transfo.NormalPerturbation(sigma=0.01)
    ]
    test_transformations = [
        lcp_transfo.UnitBallNormalize(),
        lcp_transfo.RandomSubSample(_config["dataset"]["npoints"]),
    ]


    print("Creating dataloaders...", end="", flush=True)
    if _config['dataset']['name'] == "Modelnet40_normal_resampled":
        Dataset = Modelnet40_normal_resampled
    elif _config['dataset']['name'] == "Modelnet40_ply_hdf5_2048":
        Dataset = Modelnet40_ply_hdf5_2048
    ds = Dataset(
        rootdir,
        split='training',
        network_function=network_function,
        transformations_points=training_transformations,
    )
    train_loader = torch.utils.data.DataLoader(
        ds,
        batch_size=_config["training"]["batchsize"],
        shuffle=True,
        num_workers=_config["misc"]["threads"],
    )
    ds_test = Dataset(
        rootdir,
        split='test',
        network_function=network_function,
        transformations_points=test_transformations,
    )
    test_loader = torch.utils.data.DataLoader(
        ds_test,
        batch_size=_config["training"]["batchsize"],
        shuffle=False,
        num_workers=_config["misc"]["threads"],
    )
    print("done")

    print("Creating optimizer...", end="")
    optimizer = torch.optim.Adam(net.parameters(), lr=_config["training"]["lr_start"])
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, _config["training"]["milestones"], gamma=0.5
    )
    print("done")

    def get_data(data):
        
        pts = data["pts"]
        features = data["features"]
        targets = data["target"]
        net_ids = data["net_indices"]
        net_support = data["net_support"]

        features = features.to(device)
        pts = pts.to(device)
        targets = targets.to(device)
        for i in range(len(net_ids)):
            net_ids[i] = net_ids[i].to(device)
        for i in range(len(net_support)):
            net_support[i] = net_support[i].to(device)

        return pts, features, targets, net_ids, net_support

    for epoch in range(_config["training"]["epoch_nbr"]):

        net.train()
        error = 0
        cm = np.zeros((N_LABELS, N_LABELS))

        train_aloss = "0"
        train_oa = "0"
        train_aa = "0"
        train_aiou = "0"

        t = tqdm(
            train_loader,
            desc="Epoch " + str(epoch),
            ncols=130,
            disable=_config["misc"]["disable_tqdm"],
        )
        for data in t:

            pts, features, targets, net_ids, net_support = get_data(data)

            optimizer.zero_grad()
            outputs = net(features, pts, support_points=net_support, indices=net_ids)
            loss = F.cross_entropy(outputs, targets)
            loss.backward()
            optimizer.step()

            # compute scores
            output_np = np.argmax(outputs.cpu().detach().numpy(), axis=1)
            target_np = targets.cpu().numpy()
            cm_ = confusion_matrix(
                target_np.ravel(), output_np.ravel(), labels=list(range(N_LABELS))
            )
            cm += cm_
            error += loss.item()

            # point wise scores on training
            train_oa = "{:.5f}".format(metrics.stats_overall_accuracy(cm))
            train_aa = "{:.5f}".format(metrics.stats_accuracy_per_class(cm)[0])
            train_aiou = "{:.5f}".format(metrics.stats_iou_per_class(cm)[0])
            train_aloss = "{:.5e}".format(error / cm.sum())

            t.set_postfix(OA=train_oa, AA=train_aa, AIOU=train_aiou, ALoss=train_aloss)

        net.eval()
        error = 0
        cm = np.zeros((N_LABELS, N_LABELS))
        test_aloss = "0"
        test_oa = "0"
        test_aa = "0"
        test_aiou = "0"
        with torch.no_grad():

            t = tqdm(
                test_loader,
                desc="  Test " + str(epoch),
                ncols=100,
                disable=_config["misc"]["disable_tqdm"],
            )
            for data in t:

                pts, features, targets, net_ids, net_support = get_data(data)

                outputs = net(
                    features, pts, support_points=net_support, indices=net_ids
                )
                loss = F.cross_entropy(outputs, targets)

                outputs_np = outputs.cpu().detach().numpy()
                pred_labels = np.argmax(outputs_np, axis=1)
                cm_ = confusion_matrix(
                    targets.cpu().numpy(), pred_labels, labels=list(range(N_LABELS))
                )
                cm += cm_
                error += loss.item()

                # point-wise scores on testing
                test_oa = "{:.5f}".format(metrics.stats_overall_accuracy(cm))
                test_aa = "{:.5f}".format(metrics.stats_accuracy_per_class(cm)[0])
                test_aiou = "{:.5f}".format(metrics.stats_iou_per_class(cm)[0])
                test_aloss = "{:.5e}".format(error / cm.sum())

                t.set_postfix(OA=test_oa, AA=test_aa, AIOU=test_aiou, ALoss=test_aloss)

        scheduler.step()

        # create the root folder
        os.makedirs(savedir_root, exist_ok=True)

        # save the checkpoint
        torch.save(
            {
                "epoch": epoch + 1,
                "state_dict": net.state_dict(),
                "optimizer": optimizer.state_dict(),
            },
            os.path.join(savedir_root, "checkpoint.pth"),
        )

        # write the logs
        logs = open(os.path.join(savedir_root, "logs.txt"), "a+")
        logs.write(str(epoch) + " ")
        logs.write(train_aloss + " ")
        logs.write(train_oa + " ")
        logs.write(train_aa + " ")
        logs.write(train_aiou + " ")
        logs.write(test_aloss + " ")
        logs.write(test_oa + " ")
        logs.write(test_aa + " ")
        logs.write(test_aiou + "\n")
        logs.flush()
        logs.close()

        # log for Sacred
        _run.log_scalar("trainOA", train_oa, epoch)
        _run.log_scalar("trainAA", train_aa, epoch)
        _run.log_scalar("trainAIoU", train_aiou, epoch)
        _run.log_scalar("trainLoss", train_aloss, epoch)
        _run.log_scalar("testOA", test_oa, epoch)
        _run.log_scalar("testAA", test_aa, epoch)
        _run.log_scalar("testAIoU", test_aiou, epoch)
        _run.log_scalar("testLoss", test_aloss, epoch)
Exemple #4
0
def main(_config):

    print(_config)

    savedir_root = _config["training"]["savedir"]
    device = torch.device(_config["misc"]["device"])

    print("get the data path...", end="", flush=True)
    rootdir = _config["dataset"]["dir"]
    print("done")

    N_CLASSES = 50

    print("Creating network...", end="", flush=True)

    def network_function():
        return Network(
            1,
            N_CLASSES,
            get_conv(_config["network"]["backend_conv"]),
            get_search(_config["network"]["backend_search"]),
        )

    net = network_function()
    net.load_state_dict(
        torch.load(os.path.join(savedir_root, "checkpoint.pth"),
                   map_location=device)["state_dict"])
    net.to(device)
    net.eval()
    print("Done")

    print("Creating dataloader...", end="", flush=True)
    test_transformations = [
        lcp_transfo.UnitBallNormalize(),
        lcp_transfo.RandomSubSample(_config["dataset"]["npoints"]),
    ]
    ds_test = Dataset(
        rootdir,
        'test',
        network_function=network_function,
        transformations_points=test_transformations,
        # iter_per_shape=_config["test"]["num_iter_per_shape"]
        iter_per_shape=1)
    test_loader = torch.utils.data.DataLoader(
        ds_test,
        batch_size=_config["test"]["batchsize"],
        shuffle=False,
        num_workers=_config["misc"]["threads"],
    )
    print("Done")

    # per shape results
    results = torch.zeros(ds_test.data.shape[0], ds_test.data.shape[1],
                          N_CLASSES)
    results_count = torch.zeros(ds_test.data.shape[0], ds_test.data.shape[1])

    with torch.no_grad():
        cm = np.zeros((N_CLASSES, N_CLASSES))

        t = tqdm(test_loader, ncols=100, desc="Inference")
        for data in t:

            pts = data["pts"].to(device)
            features = data["features"].to(device)
            seg = data["seg"].to(device)
            choices = data["choice"]
            labels = data["label"]
            indices = data["index"]
            net_ids = data["net_indices"]
            net_pts = data["net_support"]
            for i in range(len(net_ids)):
                net_ids[i] = net_ids[i].to(device)
            for i in range(len(net_pts)):
                net_pts[i] = net_pts[i].to(device)

            outputs = net(features,
                          pts,
                          support_points=net_pts,
                          indices=net_ids)

            outputs = outputs.to(torch.device("cpu"))
            for b_id in range(outputs.shape[0]):

                object_label = labels[i]
                part_start, part_end = ds_test.category_range[object_label]
                outputs[i, :part_start] = -1e7
                outputs[i, part_end:] = -1e7

                shape_id = indices[b_id]
                choice = choices[b_id]

                results_shape = results[shape_id]
                results_shape[choice] += outputs[b_id].transpose(0, 1)
                results[shape_id] = results_shape

                results_count_shape = results_count[shape_id]
                results_count_shape[choice] = 1
                results_count[shape_id] = results_count_shape

            output_np = outputs.cpu().numpy()
            output_np = np.argmax(output_np, axis=1).copy()
            target_np = seg.cpu().numpy().copy()

            cm_ = confusion_matrix(target_np.ravel(),
                                   output_np.ravel(),
                                   labels=list(range(N_CLASSES)))
            cm += cm_

    Confs = []
    for s_id in tqdm(range(ds_test.size()), ncols=100, desc="Conf. matrices"):

        shape_label = ds_test.labels_shape[s_id]
        # get the number of points
        npts = ds_test.data_num[s_id]

        # get the gt and estimate the number of parts
        label_gt = ds_test.labels_pts[s_id, :npts]
        part_start, part_end = ds_test.category_range[shape_label]
        label_gt -= part_start

        # get the results
        res_shape = results[s_id, :npts, part_start:part_end]

        # extend results to unseen points
        mask = results_count[s_id, :npts].cpu().numpy() == 1
        if np.logical_not(mask).sum() > 0:
            res_shape_mask = res_shape[mask]
            pts_src = torch.from_numpy(
                ds_test.data[s_id, :npts][mask]).transpose(0, 1)
            pts_dest = ds_test.data[s_id, :npts]
            pts_dest = pts_dest[np.logical_not(mask)]
            pts_dest = torch.from_numpy(pts_dest).transpose(0, 1)
            res_shape_unseen = nearest_correspondance(pts_src,
                                                      pts_dest,
                                                      res_shape_mask.transpose(
                                                          0, 1),
                                                      K=1).transpose(0, 1)
            res_shape[np.logical_not(mask)] = res_shape_unseen

        res_shape = res_shape.numpy()

        label_pred = np.argmax(res_shape, axis=1)
        cm_shape = confusion_matrix(label_gt,
                                    label_pred,
                                    labels=list(range(part_end - part_start)))
        Confs.append(cm_shape)

    # compute IoU per shape
    print("Computing IoUs...", end="", flush=True)
    IoUs_per_shape = []
    for i in range(ds_test.labels_shape.shape[0]):
        IoUs_per_shape.append(metrics.stats_iou_per_class(Confs[i])[0])
    IoUs_per_shape = np.array(IoUs_per_shape)

    # compute object category average
    obj_IoUs = np.zeros(len(ds_test.label_names))
    for i in range(len(ds_test.label_names)):
        obj_IoUs[i] = IoUs_per_shape[ds_test.labels_shape == i].mean()
    print("Done")

    print("Objs | Inst | Air  Bag  Cap  Car  Cha  Ear  Gui  "
          "Kni  Lam  Lap  Mot  Mug  Pis  Roc  Ska  Tab")
    print("-----|------|-------------------------------"
          "-------------------------------------------------")
    s = "{:3.1f} | {:3.1f} | ".format(100 * obj_IoUs.mean(),
                                      100 * np.mean(IoUs_per_shape))
    for AmIoU in obj_IoUs:
        s += "{:3.1f} ".format(100 * AmIoU)
    print(s + "\n")
Exemple #5
0
def main(_run, _config):

    print(_config)

    savedir_root = _config["training"]["savedir"]
    device = torch.device(_config["misc"]["device"])

    # save the config file
    os.makedirs(savedir_root, exist_ok=True)
    save_config_file(eval(str(_config)), os.path.join(savedir_root, "config.yaml"))

    print("get the data path...", end="", flush=True)
    rootdir = os.path.join(_config["dataset"]["datasetdir"], _config["dataset"]["dataset"])
    print("done")

    filelist_train = os.path.join(rootdir, "train_files.txt")
    filelist_val = os.path.join(rootdir, "val_files.txt")
    filelist_test = os.path.join(rootdir, "test_files.txt")

    N_CLASSES = 50

    shapenet_labels = [
        ["Airplane", 4],
        ["Bag", 2],
        ["Cap", 2],
        ["Car", 4],
        ["Chair", 4],
        ["Earphone", 3],
        ["Guitar", 3],
        ["Knife", 2],
        ["Lamp", 4],
        ["Laptop", 2],
        ["Motorbike", 6],
        ["Mug", 2],
        ["Pistol", 3],
        ["Rocket", 3],
        ["Skateboard", 3],
        ["Table", 3],
    ]
    category_range = []
    count = 0
    for element in shapenet_labels:
        part_start = count
        count += element[1]
        part_end = count
        category_range.append([part_start, part_end])

    # Prepare inputs
    print("Preparing datasets...", end="", flush=True)
    (
        data_train,
        labels_shape_train,
        data_num_train,
        labels_pts_train,
        _,
    ) = data_utils.load_seg(filelist_train)
    data_val, labels_shape_val, data_num_val, labels_pts_val, _ = data_utils.load_seg(
        filelist_val
    )
    (
        data_test,
        labels_shape_test,
        data_num_test,
        labels_pts_test,
        _,
    ) = data_utils.load_seg(filelist_test)
    data_train = np.concatenate([data_train, data_val], axis=0)
    labels_shape_train = np.concatenate([labels_shape_train, labels_shape_val], axis=0)
    data_num_train = np.concatenate([data_num_train, data_num_val], axis=0)
    labels_pts_train = np.concatenate([labels_pts_train, labels_pts_val], axis=0)
    print("Done", data_train.shape)

    # define weights
    print("Computing weights...", end="", flush=True)
    frequences = [0 for i in range(len(shapenet_labels))]
    for i in range(len(shapenet_labels)):
        frequences[i] += (labels_shape_train == i).sum()
    for i in range(len(shapenet_labels)):
        frequences[i] /= shapenet_labels[i][1]
    frequences = np.array(frequences)
    frequences = frequences.mean() / frequences
    repeat_factor = [sh[1] for sh in shapenet_labels]
    frequences = np.repeat(frequences, repeat_factor)
    weights = torch.from_numpy(frequences).float().to(device)
    print("Done")

    print("Creating network...", end="", flush=True)

    def network_function():
        return get_network(
            _config["network"]["model"],
            in_channels=1,
            out_channels=N_CLASSES,
            backend_conv=_config["network"]["backend_conv"],
            backend_search=_config["network"]["backend_search"],
        )

    net = network_function()
    net.to(device)
    network_parameters = count_parameters(net)
    print("parameters", network_parameters)

    print("Creating dataloader...", end="", flush=True)
    ds = Dataset(
        data_train,
        data_num_train,
        labels_pts_train,
        labels_shape_train,
        npoints=_config["dataset"]["npoints"],
        training=True,
        network_function=network_function,
    )
    train_loader = torch.utils.data.DataLoader(
        ds,
        batch_size=_config["training"]["batchsize"],
        shuffle=True,
        num_workers=_config["misc"]["threads"],
    )
    ds_test = Dataset(
        data_test,
        data_num_test,
        labels_pts_test,
        labels_shape_test,
        npoints=_config["dataset"]["npoints"],
        training=False,
        network_function=network_function,
    )
    test_loader = torch.utils.data.DataLoader(
        ds_test,
        batch_size=_config["training"]["batchsize"],
        shuffle=False,
        num_workers=_config["misc"]["threads"],
    )
    print("Done")

    print("Creating optimizer...", end="", flush=True)
    optimizer = torch.optim.Adam(net.parameters(), lr=_config["training"]["lr_start"], eps=1e-3)
    epoch_start = 0
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer,
        _config["training"]["milestones"],
        gamma=_config["training"]["gamma"],
        last_epoch=epoch_start - 1,
    )
    print("Done")

    # create the log file
    for epoch in range(epoch_start, _config["training"]["epoch_nbr"]):

        # train
        net.train()
        cm = np.zeros((N_CLASSES, N_CLASSES))
        t = tqdm(
            train_loader,
            ncols=120,
            desc=f"Epoch {epoch}",
            disable=_config["misc"]["disable_tqdm"],
        )
        for data in t:

            pts = data["pts"].to(device)
            features = data["features"].to(device)
            seg = data["seg"].to(device)
            labels = data["label"]
            net_ids = data["net_indices"]
            net_pts = data["net_support"]
            for i in range(len(net_ids)):
                net_ids[i] = net_ids[i].to(device)
            for i in range(len(net_pts)):
                net_pts[i] = net_pts[i].to(device)

            optimizer.zero_grad()
            outputs = net(features, pts, support_points=net_pts, indices=net_ids)
            loss = F.cross_entropy(outputs, seg, weight=weights)

            loss.backward()
            optimizer.step()

            outputs_np = outputs.cpu().detach().numpy()
            for i in range(pts.size(0)):
                # get the number of part for the shape
                object_label = labels[i]
                part_start, part_end = category_range[object_label]

                outputs_np[i, :part_start] = -1e7
                outputs_np[i, part_end:] = -1e7

            output_np = np.argmax(outputs_np, axis=1).copy()
            target_np = seg.cpu().numpy().copy()

            cm_ = confusion_matrix(
                target_np.ravel(), output_np.ravel(), labels=list(range(N_CLASSES))
            )
            cm += cm_

            oa = "{:.3f}".format(metrics.stats_overall_accuracy(cm))
            aa = "{:.3f}".format(metrics.stats_accuracy_per_class(cm)[0])
            iou = "{:.3f}".format(metrics.stats_iou_per_class(cm)[0])

            t.set_postfix(OA=oa, AA=aa, IOU=iou)

        # eval (this is not the final evaluation, see dedicated evaluation)
        net.eval()
        with torch.no_grad():
            cm = np.zeros((N_CLASSES, N_CLASSES))
            t = tqdm(
                test_loader,
                ncols=120,
                desc=f"Test {epoch}",
                disable=_config["misc"]["disable_tqdm"],
            )
            for data in t:
                pts = data["pts"].to(device)
                features = data["features"].to(device)
                seg = data["seg"].to(device)
                labels = data["label"]
                net_ids = data["net_indices"]
                net_pts = data["net_support"]
                for i in range(len(net_ids)):
                    net_ids[i] = net_ids[i].to(device)
                for i in range(len(net_pts)):
                    net_pts[i] = net_pts[i].to(device)

                outputs = net(features, pts, support_points=net_pts, indices=net_ids)
                loss = 0

                for i in range(pts.size(0)):
                    # get the number of part for the shape
                    object_label = labels[i]
                    part_start, part_end = category_range[object_label]

                    outputs_ = (outputs[i, part_start:part_end]).unsqueeze(0)
                    seg_ = (seg[i] - part_start).unsqueeze(0)

                    loss = loss + weights[object_label] * F.cross_entropy(
                        outputs_, seg_
                    )

                outputs_np = outputs.cpu().detach().numpy()
                for i in range(pts.size(0)):
                    # get the number of part for the shape
                    object_label = labels[i]
                    part_start, part_end = category_range[object_label]

                    outputs_np[i, :part_start] = -1e7
                    outputs_np[i, part_end:] = -1e7

                output_np = np.argmax(outputs_np, axis=1).copy()
                target_np = seg.cpu().numpy().copy()

                cm_ = confusion_matrix(
                    target_np.ravel(), output_np.ravel(), labels=list(range(N_CLASSES))
                )
                cm += cm_

                oa_test = "{:.3f}".format(metrics.stats_overall_accuracy(cm))
                aa_test = "{:.3f}".format(metrics.stats_accuracy_per_class(cm)[0])
                iou_test = "{:.3f}".format(metrics.stats_iou_per_class(cm)[0])

                t.set_postfix(OA=oa_test, AA=aa_test, IOU=iou_test)

        # scheduler update
        scheduler.step()

        # save the model
        os.makedirs(savedir_root, exist_ok=True)
        torch.save(
            {
                "epoch": epoch + 1,
                "state_dict": net.state_dict(),
                "optimizer": optimizer.state_dict(),
            },
            os.path.join(savedir_root, "checkpoint.pth"),
        )

        # write the logs
        logs = open(os.path.join(savedir_root, "log.txt"), "a+")
        logs.write(f"{epoch} {oa} {aa} {iou} {oa_test} {aa_test} {iou_test} \n")
        logs.close()

        _run.log_scalar("trainOA", oa, epoch)
        _run.log_scalar("trainAA", aa, epoch)
        _run.log_scalar("trainIoU", iou, epoch)
        _run.log_scalar("testOA", oa_test, epoch)
        _run.log_scalar("testAA", aa_test, epoch)
        _run.log_scalar("testIoU", iou_test, epoch)

    logs.close()
Exemple #6
0
def main(_config):

    print(_config)

    savedir_root = _config['training']['savedir']
    device = torch.device(_config['misc']['device'])
    rootdir = _config['dataset']['dir']

    N_CLASSES = 13

    # create the network
    print("Creating the network...", end="", flush=True)
    if "Fusion" == _config["network"]["model"]:

        def network_function():
            return NetworkFusion(3,
                                 N_CLASSES,
                                 get_conv(_config["network"]["backend_conv"]),
                                 get_search(
                                     _config["network"]["backend_search"]),
                                 config=_config)
    else:

        def network_function():
            return Network(3,
                           N_CLASSES,
                           get_conv(_config["network"]["backend_conv"]),
                           get_search(_config["network"]["backend_search"]),
                           config=_config)

    net = network_function()
    net.load_state_dict(
        torch.load(os.path.join(savedir_root, "checkpoint.pth"))["state_dict"])
    net.to(device)
    net.eval()
    print("Done")

    validation_transformations_data = []
    validation_transformations_features = []

    if not _config['training']['rgb']:
        validation_transformations_features.append(lcp_transfo.NoColor())

    ds = Dataset(rootdir,
                 _config,
                 split='test',
                 network_function=network_function,
                 transformations_data=validation_transformations_data,
                 transformations_features=validation_transformations_features)

    # create the global confusion matrix
    cm_global = np.zeros((N_CLASSES, N_CLASSES))

    for file_id in range(ds.size()):

        ds.compute_sliding_window(file_id, _config["test"]["step"],
                                  _config["dataset"]["num_points"])

        test_loader = torch.utils.data.DataLoader(
            ds,
            batch_size=_config['test']['batch_size'],
            shuffle=False,
            num_workers=_config['misc']['threads'])

        filename = ds.filelist[file_id]

        # create a score accumulator
        scores = np.zeros((ds.get_points().shape[0], N_CLASSES))

        # iterate over the dataloader
        t = tqdm(test_loader, ncols=100, desc=filename)
        with torch.no_grad():
            for data in t:

                pts = data['pts'].to(device)
                features = data['features'].to(device)
                pts_ids = data['pts_ids']
                net_ids = data["net_indices"]
                net_pts = data["net_support"]
                for i in range(len(net_ids)):
                    net_ids[i] = net_ids[i].to(device)
                for i in range(len(net_pts)):
                    net_pts[i] = net_pts[i].to(device)

                outputs = net(features,
                              pts,
                              indices=net_ids,
                              support_points=net_pts)
                outputs_np = outputs.transpose(
                    1, 2).cpu().detach().numpy().reshape((-1, N_CLASSES))
                scores[pts_ids.numpy().ravel()] += outputs_np

        # get the original points
        original_points = ds.get_points()
        original_labels = ds.get_labels()

        # mask = np.logical_and((np.abs(scores).sum(1) > 0), np.argmax(scores, axis=1) == np.argmax(scores_noc, axis=1))

        # compute the mask of points seen at prediction time
        mask = (np.abs(scores).sum(1) > 0)
        seen_scores = scores[mask]
        seen_points = original_points[mask]

        # project the scores on the original points
        scores = nearest_correspondance(
            torch.from_numpy(seen_points).float().transpose(0, 1),
            torch.from_numpy(original_points).float().transpose(0, 1),
            torch.from_numpy(seen_scores).float().transpose(0, 1),
            K=1).transpose(0, 1).numpy()
        original_preds = np.argmax(scores, axis=1)

        # confusion matrix
        cm = confusion_matrix(original_labels,
                              original_preds,
                              labels=list(range(N_CLASSES)))
        cm_global += cm

        print("IoU", metrics.stats_iou_per_class(cm)[0])

        # saving results
        savedir_results = os.path.join(
            savedir_root, f"results_step{_config['test']['step']}", filename)

        # saving labels
        if _config['test']['savepreds']:
            os.makedirs(savedir_results, exist_ok=True)
            np.savetxt(os.path.join(savedir_results, "pred.txt"),
                       original_preds,
                       fmt='%d')

        if _config['test']['savepts']:
            os.makedirs(savedir_results, exist_ok=True)
            original_preds = np.expand_dims(original_preds, 1).astype(int)
            original_points = ds.get_points()
            original_points = np.concatenate([original_points, original_preds],
                                             axis=1)
            np.savetxt(os.path.join(savedir_results, "pts.txt"),
                       original_points,
                       fmt=['%.4f', '%.4f', '%.4f', '%d', '%d', '%d', '%d'])

    print(
        "WARNING: The next scores requires to be check with evaluation script at Convpoint repo"
    )
    print("TODO: check if OK with eval scrip")
    iou = metrics.stats_iou_per_class(cm_global)
    print("Global IoU")
    print(iou[0])
    print("Global IoU per class")
    print(iou[1])

    print(f"{iou[0]} ", end="", flush=True)
    for i in range(iou[1].shape[0]):
        print(f"{iou[1][i]} ", end="")
    print("")
Exemple #7
0
def main(_config):

    print(_config)

    savedir_root = _config["savedir"]
    device = torch.device(_config["device"])

    print("get the data path...", end="", flush=True)
    rootdir = os.path.join(_config["datasetdir"], _config["dataset"])
    print("done")

    filelist_test = os.path.join(rootdir, "test_files.txt")

    N_CLASSES = 50

    shapenet_labels = [
        ["Airplane", 4],
        ["Bag", 2],
        ["Cap", 2],
        ["Car", 4],
        ["Chair", 4],
        ["Earphone", 3],
        ["Guitar", 3],
        ["Knife", 2],
        ["Lamp", 4],
        ["Laptop", 2],
        ["Motorbike", 6],
        ["Mug", 2],
        ["Pistol", 3],
        ["Rocket", 3],
        ["Skateboard", 3],
        ["Table", 3],
    ]
    category_range = []
    count = 0
    for element in shapenet_labels:
        part_start = count
        count += element[1]
        part_end = count
        category_range.append([part_start, part_end])

    # Prepare inputs
    print("Preparing datasets...", end="", flush=True)
    (
        data_test,
        labels_shape_test,
        data_num_test,
        labels_pts_test,
        _,
    ) = data_utils.load_seg(filelist_test)
    print("Done", data_test.shape)

    print("Creating network...", end="", flush=True)

    def network_function():
        return get_network(
            _config["model"],
            in_channels=1,
            out_channels=N_CLASSES,
            ConvNet_name=_config["backend_conv"],
            Search_name=_config["backend_search"],
        )

    net = network_function()
    net.load_state_dict(
        torch.load(os.path.join(savedir_root, "checkpoint.pth"),
                   map_location=device)["state_dict"])
    net.to(device)
    print("Done")

    print("Creating dataloader...", end="", flush=True)
    ds_test = Dataset(
        data_test,
        data_num_test,
        labels_pts_test,
        labels_shape_test,
        npoints=_config["npoints"],
        training=False,
        network_function=network_function,
        num_iter_per_shape=_config["num_iter_per_shape"],
    )
    test_loader = torch.utils.data.DataLoader(
        ds_test,
        batch_size=_config["batchsize"],
        shuffle=False,
        num_workers=_config["threads"],
    )
    print("Done")

    # per shape results
    results = torch.zeros(data_test.shape[0], data_test.shape[1], N_CLASSES)
    results_count = torch.zeros(data_test.shape[0], data_test.shape[1])

    with torch.no_grad():
        cm = np.zeros((N_CLASSES, N_CLASSES))

        t = tqdm(test_loader, ncols=100, desc="Inference")
        for data in t:

            pts = data["pts"].to(device)
            features = data["features"].to(device)
            seg = data["seg"].to(device)
            choices = data["choice"]
            labels = data["label"]
            indices = data["index"]
            net_ids = data["net_indices"]
            net_pts = data["net_support"]
            for i in range(len(net_ids)):
                net_ids[i] = net_ids[i].to(device)
            for i in range(len(net_pts)):
                net_pts[i] = net_pts[i].to(device)

            outputs = net(features,
                          pts,
                          support_points=net_pts,
                          indices=net_ids)

            outputs = outputs.to(torch.device("cpu"))
            for b_id in range(outputs.shape[0]):

                object_label = labels[i]
                part_start, part_end = category_range[object_label]
                outputs[i, :part_start] = -1e7
                outputs[i, part_end:] = -1e7

                shape_id = indices[b_id]
                choice = choices[b_id]

                results_shape = results[shape_id]
                results_shape[choice] += outputs[b_id].transpose(0, 1)
                results[shape_id] = results_shape

                results_count_shape = results_count[shape_id]
                results_count_shape[choice] = 1
                results_count[shape_id] = results_count_shape

            output_np = outputs.cpu().numpy()
            output_np = np.argmax(output_np, axis=1).copy()
            target_np = seg.cpu().numpy().copy()

            cm_ = confusion_matrix(target_np.ravel(),
                                   output_np.ravel(),
                                   labels=list(range(N_CLASSES)))
            cm += cm_

    Confs = []
    for s_id in tqdm(range(data_test.shape[0]),
                     ncols=100,
                     desc="Conf. matrices"):

        shape_label = labels_shape_test[s_id]
        # get the number of points
        npts = data_num_test[s_id]

        # get the gt and estimate the number of parts
        label_gt = labels_pts_test[s_id, :npts]
        part_start, part_end = category_range[shape_label]
        label_gt -= part_start

        # get the results
        res_shape = results[s_id, :npts, part_start:part_end]

        # extend results to unseen points
        mask = results_count[s_id, :npts].cpu().numpy() == 1
        if np.logical_not(mask).sum() > 0:
            res_shape_mask = res_shape[mask]
            pts_src = torch.from_numpy(data_test[s_id, :npts][mask]).transpose(
                0, 1)
            pts_dest = data_test[s_id, :npts]
            pts_dest = pts_dest[np.logical_not(mask)]
            pts_dest = torch.from_numpy(pts_dest).transpose(0, 1)
            res_shape_unseen = nearest_correspondance(pts_src,
                                                      pts_dest,
                                                      res_shape_mask.transpose(
                                                          0, 1),
                                                      K=1).transpose(0, 1)
            res_shape[np.logical_not(mask)] = res_shape_unseen

        res_shape = res_shape.numpy()

        label_pred = np.argmax(res_shape, axis=1)
        cm_shape = confusion_matrix(label_gt,
                                    label_pred,
                                    labels=list(range(part_end - part_start)))
        Confs.append(cm_shape)

    # compute IoU per shape
    print("Computing IoUs...", end="", flush=True)
    IoUs_per_shape = []
    for i in range(labels_shape_test.shape[0]):
        IoUs_per_shape.append(metrics.stats_iou_per_class(Confs[i])[0])
    IoUs_per_shape = np.array(IoUs_per_shape)

    # compute object category average
    obj_IoUs = np.zeros(len(shapenet_labels))
    for i in range(len(shapenet_labels)):
        obj_IoUs[i] = IoUs_per_shape[labels_shape_test == i].mean()
    print("Done")

    print("Objs | Inst | Air  Bag  Cap  Car  Cha  Ear  Gui  "
          "Kni  Lam  Lap  Mot  Mug  Pis  Roc  Ska  Tab")
    print("-----|------|-------------------------------"
          "-------------------------------------------------")
    s = "{:3.1f} | {:3.1f} | ".format(100 * obj_IoUs.mean(),
                                      100 * np.mean(IoUs_per_shape))
    for AmIoU in obj_IoUs:
        s += "{:3.1f} ".format(100 * AmIoU)
    print(s + "\n")