예제 #1
0
def test(args, flist_test, model_folder, info_class):
    nb_class = info_class['nb_class']
    # create the network
    print("Creating network...")
    net, features = get_model(nb_class, args)
    state = torch.load(model_folder / "state_dict.pth")
    net.load_state_dict(state['state_dict'])
    net.cuda()
    net.eval()
    print(f"Number of parameters in the model: {count_parameters(net):,}")

    for filename in flist_test:
        print(filename)
        ds_tst = PartDatasetTest(filename,
                                 args.rootdir,
                                 block_size=args.blocksize,
                                 npoints=args.npoints,
                                 test_step=args.test_step,
                                 features=features,
                                 labels=args.test_labels)
        tst_loader = torch.utils.data.DataLoader(ds_tst,
                                                 batch_size=args.batchsize,
                                                 shuffle=False,
                                                 num_workers=args.num_workers)

        xyz = ds_tst.xyzni[:, :3]
        scores = np.zeros((xyz.shape[0], nb_class))

        total_time = 0
        iter_nb = 0
        with torch.no_grad():
            t = tqdm(tst_loader, ncols=150)
            for pts, features, indices in t:
                t1 = time.time()
                features = features.cuda()
                pts = pts.cuda()
                outputs = net(features, pts)
                t2 = time.time()

                outputs_np = outputs.cpu().numpy().reshape((-1, nb_class))
                scores[indices.cpu().numpy().ravel()] += outputs_np

                iter_nb += 1
                total_time += (t2 - t1)
                t.set_postfix(
                    time=f"{total_time / (iter_nb * args.batchsize):05e}")

        mask = np.logical_not(scores.sum(1) == 0)
        scores = scores[mask]
        pts_src = xyz[mask]

        # create the scores for all points
        scores = nearest_correspondance(pts_src, xyz, scores, K=1)

        # compute softmax
        scores = scores - scores.max(axis=1)[:, None]
        scores = np.exp(scores) / np.exp(scores).sum(1)[:, None]
        scores = np.nan_to_num(scores)
        scores = scores.argmax(1)

        # Compute confusion matrix
        if args.test_labels:
            tst_logs = InformationLogger(model_folder, 'tst')
            lbl = ds_tst.labels[:, :]

            cm = confusion_matrix(lbl.ravel(),
                                  scores.ravel(),
                                  labels=list(range(nb_class)))

            cl_acc = metrics.stats_accuracy_per_class(cm)
            cl_iou = metrics.stats_iou_per_class(cm)
            cl_fscore = metrics.stats_f1score_per_class(cm)

            print(f"Stats for test dataset:")
            print_metric('Test', 'Accuracy', cl_acc)
            print_metric('Test', 'iou', cl_iou)
            print_metric('Test', 'F1-Score', cl_fscore)
            tst_avg_score = {
                'loss': -1,
                'acc': cl_acc[0],
                'iou': cl_iou[0],
                'fscore': [0]
            }
            tst_class_score = {
                'acc': cl_acc[1],
                'iou': cl_iou[1],
                'fscore': cl_fscore[1]
            }
            tst_logs.add_metric_values(tst_avg_score, -1)
            tst_logs.add_class_scores(tst_class_score, -1)

            # write error file.
            # error2ply(model_folder / f"{filename}_error.ply", xyz=xyz, labels=lbl, prediction=scores, info_class=info_class['class_info'])

        if args.savepts:
            # Save predictions
            out_folder = model_folder / 'tst'
            out_folder.mkdir(exist_ok=True)
            prediction2ply(model_folder / f"{filename}_predictions.ply",
                           xyz=xyz,
                           prediction=scores,
                           info_class=info_class['class_info'])
예제 #2
0
def train(args):

    THREADS = 4
    USE_CUDA = True
    N_CLASSES = 50
    EPOCHS = 200
    MILESTONES = [60, 120]

    shapenet_labels = [
        ['Airplane', 4],
        ['Bag', 2],
        ['Cap', 2],
        ['Car', 4],
        ['Chair', 4],
        ['Earphone', 3],
        ['Guitar', 3],
        ['Knife', 2],
        ['Lamp', 4],
        ['Laptop', 2],
        ['Motorbike', 6],
        ['Mug', 2],
        ['Pistol', 3],
        ['Rocket', 3],
        ['Skateboard', 3],
        ['Table', 3],
    ]
    category_range = []
    count = 0
    for element in shapenet_labels:
        part_start = count
        count += element[1]
        part_end = count
        category_range.append([part_start, part_end])

    # Prepare inputs
    print('{}-Preparing datasets...'.format(datetime.now()))
    is_list_of_h5_list = not data_utils.is_h5_list(args.filelist)
    if is_list_of_h5_list:
        seg_list = data_utils.load_seg_list(args.filelist)
        seg_list_idx = 0
        filelist_train = seg_list[seg_list_idx]
        seg_list_idx = seg_list_idx + 1
    else:
        filelist_train = args.filelist
    data_train, labels, data_num_train, label_train, _ = data_utils.load_seg(
        filelist_train)
    print("Done", data_train.shape)

    print("Computing class weights (if needed, 1 otherwise)...")
    if args.weighted:
        frequences = []
        for i in range(len(shapenet_labels)):
            frequences.append((labels == i).sum())
        frequences = np.array(frequences)
        frequences = frequences.mean() / frequences
    else:
        frequences = [1 for _ in range(len(shapenet_labels))]
    weights = torch.FloatTensor(frequences)
    if USE_CUDA:
        weights = weights.cuda()
    print("Done")

    print("Creating network...")
    net = get_model(args.model, input_channels=1, output_channels=N_CLASSES)
    net.cuda()
    print("parameters", count_parameters(net))

    ds = PartNormalDataset(data_train,
                           data_num_train,
                           label_train,
                           npoints=args.npoints)
    train_loader = torch.utils.data.DataLoader(ds,
                                               batch_size=args.batchsize,
                                               shuffle=True,
                                               num_workers=THREADS)

    optimizer = torch.optim.Adam(net.parameters(), lr=1e-3)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, MILESTONES)

    # create the model folder
    time_string = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
    root_folder = os.path.join(
        args.savedir,
        "{}_b{}_pts{}_weighted{}_{}".format(args.model, args.batchsize,
                                            args.npoints, args.weighted,
                                            time_string))
    os.makedirs(root_folder, exist_ok=True)

    # create the log file
    logs = open(os.path.join(root_folder, "log.txt"), "w")
    for epoch in range(EPOCHS):
        scheduler.step()
        cm = np.zeros((N_CLASSES, N_CLASSES))
        t = tqdm(train_loader, ncols=120, desc="Epoch {}".format(epoch))
        for pts, features, seg, indices in t:

            if USE_CUDA:
                features = features.cuda()
                pts = pts.cuda()
                seg = seg.cuda()

            optimizer.zero_grad()
            outputs = net(features, pts)

            # loss =  F.cross_entropy(outputs.view(-1, N_CLASSES), seg.view(-1))

            loss = 0
            for i in range(pts.size(0)):
                # get the number of part for the shape
                object_label = labels[indices[i]]
                part_start, part_end = category_range[object_label]
                part_nbr = part_end - part_start
                loss = loss + weights[object_label] * F.cross_entropy(
                    outputs[i, :, part_start:part_end].view(-1, part_nbr),
                    seg[i].view(-1) - part_start)

            loss.backward()
            optimizer.step()

            outputs_np = outputs.cpu().detach().numpy()
            for i in range(pts.size(0)):
                # get the number of part for the shape
                object_label = labels[indices[i]]
                part_start, part_end = category_range[object_label]
                part_nbr = part_end - part_start
                outputs_np[i, :, :part_start] = -1e7
                outputs_np[i, :, part_end:] = -1e7

            output_np = np.argmax(outputs_np, axis=2).copy()
            target_np = seg.cpu().numpy().copy()

            cm_ = confusion_matrix(target_np.ravel(),
                                   output_np.ravel(),
                                   labels=list(range(N_CLASSES)))
            cm += cm_

            oa = "{:.3f}".format(metrics.stats_overall_accuracy(cm))
            aa = "{:.3f}".format(metrics.stats_accuracy_per_class(cm)[0])

            t.set_postfix(OA=oa, AA=aa)

        # save the model
        torch.save(net.state_dict(), os.path.join(root_folder,
                                                  "state_dict.pth"))

        # write the logs
        logs.write("{} {} {} \n".format(epoch, oa, aa))
        logs.flush()

    logs.close()
예제 #3
0
def train(args, dataset_dict, info_class):

    nb_class = info_class['nb_class']
    print("Creating network...")
    net, features = get_model(nb_class, args)
    net.cuda()
    print(f"Number of parameters in the model: {count_parameters(net):,}")

    print("Creating dataloader and optimizer...", end="")
    ds_trn = PartDatasetTrainVal(filelist=dataset_dict['trn'],
                                 folder=args.rootdir,
                                 training=True,
                                 block_size=args.blocksize,
                                 npoints=args.npoints,
                                 iteration_number=args.batchsize * args.iter,
                                 features=features,
                                 class_info=info_class['class_info'])
    train_loader = torch.utils.data.DataLoader(ds_trn,
                                               batch_size=args.batchsize,
                                               shuffle=True,
                                               num_workers=args.num_workers)

    ds_val = PartDatasetTrainVal(filelist=dataset_dict['val'],
                                 folder=args.rootdir,
                                 training=False,
                                 block_size=args.blocksize,
                                 npoints=args.npoints,
                                 iteration_number=args.batchsize *
                                 args.val_iter,
                                 features=features,
                                 class_info=info_class['class_info'])
    val_loader = torch.utils.data.DataLoader(ds_val,
                                             batch_size=args.batchsize,
                                             shuffle=False,
                                             num_workers=args.num_workers)

    optimizer = torch.optim.Adam(net.parameters(), lr=float(args.lr))
    print("done")

    # create the root folder
    print("Creating results folder...", end="")
    time_string = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
    root_folder = Path(
        f"{args.savedir}/{args.model}_{args.npoints}_drop{args.drop}_{time_string}"
    )
    root_folder.mkdir(exist_ok=True)
    args_dict = vars(args)
    args_dict['data'] = dataset_dict
    #write_config(root_folder, args_dict)
    print("done at", root_folder)

    # create the log file
    trn_logs = InformationLogger(root_folder, 'trn')
    val_logs = InformationLogger(root_folder, 'val')

    # iterate over epochs
    for epoch in range(args.nepochs):

        #######
        # training
        net.train()

        train_loss = 0
        cm = np.zeros((nb_class, nb_class))
        t = tqdm(train_loader, ncols=150, desc="Epoch {}".format(epoch))
        for pts, features, seg in t:
            features = features.cuda()
            pts = pts.cuda()
            seg = seg.cuda()

            optimizer.zero_grad()
            outputs = net(features, pts)
            loss = F.cross_entropy(outputs.view(-1, nb_class), seg.view(-1))
            loss.backward()
            optimizer.step()

            output_np = np.argmax(outputs.cpu().detach().numpy(),
                                  axis=2).copy()
            target_np = seg.cpu().numpy().copy()

            cm_ = confusion_matrix(target_np.ravel(),
                                   output_np.ravel(),
                                   labels=list(range(nb_class)))
            cm += cm_

            oa = f"{metrics.stats_overall_accuracy(cm):.4f}"
            acc = metrics.stats_accuracy_per_class(cm)
            iou = metrics.stats_iou_per_class(cm)

            train_loss += loss.detach().cpu().item()

            t.set_postfix(OA=wblue(oa),
                          AA=wblue(f"{acc[0]:.4f}"),
                          IOU=wblue(f"{iou[0]:.4f}"),
                          LOSS=wblue(f"{train_loss / cm.sum():.4e}"))
        fscore = metrics.stats_f1score_per_class(cm)
        trn_metrics_values = {
            'loss': f"{train_loss / cm.sum():.4e}",
            'acc': acc[0],
            'iou': iou[0],
            'fscore': fscore[0]
        }
        trn_class_score = {'acc': acc[1], 'iou': iou[1], 'fscore': fscore[1]}
        trn_logs.add_metric_values(trn_metrics_values, epoch)
        trn_logs.add_class_scores(trn_class_score, epoch)
        print_metric('Training', 'F1-Score', fscore)

        ######
        # validation
        net.eval()
        cm_val = np.zeros((nb_class, nb_class))
        val_loss = 0
        t = tqdm(val_loader,
                 ncols=150,
                 desc="  Validation epoch {}".format(epoch))
        with torch.no_grad():
            for pts, features, seg in t:
                features = features.cuda()
                pts = pts.cuda()
                seg = seg.cuda()

                outputs = net(features, pts)
                loss = F.cross_entropy(outputs.view(-1, nb_class),
                                       seg.view(-1))

                output_np = np.argmax(outputs.cpu().detach().numpy(),
                                      axis=2).copy()
                target_np = seg.cpu().numpy().copy()

                cm_ = confusion_matrix(target_np.ravel(),
                                       output_np.ravel(),
                                       labels=list(range(nb_class)))
                cm_val += cm_

                oa_val = f"{metrics.stats_overall_accuracy(cm_val):.4f}"
                acc_val = metrics.stats_accuracy_per_class(cm_val)
                iou_val = metrics.stats_iou_per_class(cm_val)

                val_loss += loss.detach().cpu().item()

                t.set_postfix(OA=wgreen(oa_val),
                              AA=wgreen(f"{acc_val[0]:.4f}"),
                              IOU=wgreen(f"{iou_val[0]:.4f}"),
                              LOSS=wgreen(f"{val_loss / cm_val.sum():.4e}"))

        fscore_val = metrics.stats_f1score_per_class(cm_val)

        # save the model

        state = {
            'epoch': epoch,
            'state_dict': net.state_dict(),
            'optimizer': optimizer.state_dict(),
            'args': args
        }
        torch.save(state, root_folder / "state_dict.pth")

        # write the logs
        val_metrics_values = {
            'loss': f"{val_loss / cm_val.sum():.4e}",
            'acc': acc_val[0],
            'iou': iou_val[0],
            'fscore': fscore_val[0]
        }
        val_class_score = {
            'acc': acc_val[1],
            'iou': iou_val[1],
            'fscore': fscore_val[1]
        }

        val_logs.add_metric_values(val_metrics_values, epoch)
        val_logs.add_class_scores(val_class_score, epoch)
        print_metric('Validation', 'F1-Score', fscore_val)

    return root_folder
예제 #4
0
    def apply(epoch, training=False):
        error = 0
        cm = np.zeros((N_LABELS, N_LABELS))

        if training:
            t = tqdm(train_loader, desc="Epoch " + str(epoch), ncols=130)
            for pts, features, targets, indices in t:
                if args.cuda:
                    features = features.cuda()
                    pts = pts.cuda()
                    targets = targets.cuda()

                optimizer.zero_grad()

                outputs = net(features, pts)
                targets = targets.view(-1)
                loss = F.cross_entropy(outputs, targets)

                loss.backward()
                optimizer.step()

                output_np = np.argmax(outputs.cpu().detach().numpy(), axis=1)
                target_np = targets.cpu().numpy()

                cm_ = confusion_matrix(target_np.ravel(),
                                       output_np.ravel(),
                                       labels=list(range(N_LABELS)))
                cm += cm_
                error += loss.item()

                # scores
                oa = "{:.5f}".format(metrics.stats_overall_accuracy(cm))
                aa = "{:.5f}".format(metrics.stats_accuracy_per_class(cm)[0])
                aiou = "{:.5f}".format(metrics.stats_iou_per_class(cm)[0])
                aloss = "{:.5e}".format(error / cm.sum())

                t.set_postfix(OA=oa, AA=aa, AIOU=aiou, ALoss=aloss)

        else:
            predictions = np.zeros((test_data.shape[0], N_LABELS), dtype=float)
            t = tqdm(test_loader, desc="  Test " + str(epoch), ncols=100)
            for pts, features, targets, indices in t:
                if args.cuda:
                    features = features.cuda()
                    pts = pts.cuda()
                    targets = targets.cuda()

                outputs = net(features, pts)
                targets = targets.view(-1)
                loss = F.cross_entropy(outputs, targets)

                outputs_np = outputs.cpu().detach().numpy()
                for i in range(indices.size(0)):
                    predictions[indices[i]] += outputs_np[i]
                    # l_ = np.argmax(outputs_np[i])
                    # predictions[indices[i],l_] += 1

                error += loss.item()

                if args.ntree == 1:
                    pred_labels = np.argmax(outputs_np, axis=1)
                    cm_ = confusion_matrix(targets.cpu().numpy(),
                                           pred_labels,
                                           labels=list(range(N_LABELS)))
                    cm += cm_

                    # scores
                    oa = "{:.5f}".format(metrics.stats_overall_accuracy(cm))
                    aa = "{:.5f}".format(
                        metrics.stats_accuracy_per_class(cm)[0])
                    aiou = "{:.5f}".format(metrics.stats_iou_per_class(cm)[0])
                    aloss = "{:.5e}".format(error / cm.sum())

                    t.set_postfix(OA=oa, AA=aa, AIOU=aiou, ALoss=aloss)

            predictions = np.argmax(predictions, axis=1)
            cm = confusion_matrix(test_labels,
                                  predictions,
                                  labels=list(range(N_LABELS)))

            oa = "{:.5f}".format(metrics.stats_overall_accuracy(cm))
            aa = "{:.5f}".format(metrics.stats_accuracy_per_class(cm)[0])
            aiou = "{:.5f}".format(metrics.stats_iou_per_class(cm)[0])
            aloss = "{:.5e}".format(error / cm.sum())

            print("Predictions", "loss", aloss, "OA", oa, "AA", aa, "IOU",
                  aiou)

        return aloss, oa, aa, aiou