Пример #1
0
def train(args):

    # Prepare inputs
    print('{}-Preparing datasets...'.format(datetime.now()))
    is_list_of_h5_list = not data_utils.is_h5_list(args.filelist)
    if is_list_of_h5_list:
        seg_list = data_utils.load_seg_list(args.filelist)
        seg_list_idx = 0
        filelist_train = seg_list[seg_list_idx]
        seg_list_idx = seg_list_idx + 1
    else:
        filelist_train = args.filelist
    data_train, labels, data_num_train, label_train, _ = data_utils.load_seg(
        filelist_train)
    print("Done", data_train.shape)

    THREADS = 4
    BATCH_SIZE = args.batchsize
    USE_CUDA = True
    N_CLASSES = 50
    EPOCHS = 200
    MILESTONES = [60, 120, 180]

    print("Creating network...")
    net = Net(input_channels=1, output_channels=N_CLASSES)
    net.cuda()
    print("parameters", count_parameters(net))

    ds = PartNormalDataset(data_train,
                           data_num_train,
                           label_train,
                           net.config,
                           npoints=args.npoints,
                           shape_labels=labels)
    train_loader = torch.utils.data.DataLoader(ds,
                                               batch_size=BATCH_SIZE,
                                               shuffle=True,
                                               num_workers=THREADS,
                                               collate_fn=tree_collate)

    optimizer = torch.optim.Adam(net.parameters(), lr=1e-3)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, MILESTONES)

    # create the model folder
    time_string = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
    root_folder = os.path.join(
        args.savedir, "Net_b{}_pts{}_{}".format(args.batchsize, args.npoints,
                                                time_string))
    os.makedirs(root_folder, exist_ok=True)

    # create the log file
    logs = open(os.path.join(root_folder, "log.txt"), "w")
    for epoch in range(EPOCHS):
        scheduler.step()
        cm = np.zeros((N_CLASSES, N_CLASSES))
        t = tqdm(train_loader, ncols=120, desc="Epoch {}".format(epoch))
        for pts, features, seg, tree, labels in t:

            if USE_CUDA:
                features = features.cuda()
                pts = pts.cuda()
                for l_id in range(len(tree)):
                    tree[l_id]["points"] = tree[l_id]["points"].cuda()
                    tree[l_id]["indices"] = tree[l_id]["indices"].cuda()
                seg = seg.cuda()

            optimizer.zero_grad()
            outputs = net(features, pts, tree)
            loss = F.cross_entropy(outputs.view(-1, N_CLASSES), seg.view(-1))
            loss.backward()
            optimizer.step()

            output_np = np.argmax(outputs.cpu().detach().numpy(),
                                  axis=2).copy()
            target_np = seg.cpu().numpy().copy()

            cm_ = confusion_matrix(target_np.ravel(),
                                   output_np.ravel(),
                                   labels=list(range(N_CLASSES)))
            cm += cm_

            oa = "{:.3f}".format(metrics.stats_overall_accuracy(cm))
            aa = "{:.3f}".format(metrics.stats_accuracy_per_class(cm)[0])

            t.set_postfix(OA=oa, AA=aa)

        # save the model
        torch.save(net.state_dict(), os.path.join(root_folder,
                                                  "state_dict.pth"))

        # write the logs
        logs.write("{} {} {} \n".format(epoch, oa, aa))
        logs.flush()

    logs.close()
Пример #2
0
def test_multiple(args):
    THREADS = 4
    BATCH_SIZE = args.batchsize
    USE_CUDA = True
    N_CLASSES = 50

    args.data_folder = os.path.join(args.rootdir, "test_data")

    # create the output folders
    output_folder = args.savedir + '_predictions_multi_{}'.format(args.ntree)
    category_list = [
        (category, int(label_num))
        for (category,
             label_num) in [line.split() for line in open(args.category, 'r')]
    ]
    offset = 0
    category_range = dict()
    for category, category_label_seg_max in category_list:
        category_range[category] = (offset, offset + category_label_seg_max)
        offset = offset + category_label_seg_max
        folder = os.path.join(output_folder, category)
        if not os.path.exists(folder):
            os.makedirs(folder)

    input_filelist = []
    output_filelist = []
    output_ply_filelist = []
    for category in sorted(os.listdir(args.data_folder)):
        data_category_folder = os.path.join(args.data_folder, category)
        for filename in sorted(os.listdir(data_category_folder)):
            input_filelist.append(
                os.path.join(args.data_folder, category, filename))
            output_filelist.append(
                os.path.join(output_folder, category, filename[0:-3] + 'seg'))
            output_ply_filelist.append(
                os.path.join(output_folder + '_ply', category,
                             filename[0:-3] + 'ply'))

    # Prepare inputs
    print('{}-Preparing datasets...'.format(datetime.now()))
    data, label, data_num, label_test, _ = data_utils.load_seg(
        args.filelist_val)  # no segmentation labels

    net = Net(input_channels=1, output_channels=N_CLASSES)
    net.load_state_dict(
        torch.load(os.path.join(args.savedir, "state_dict.pth")))
    net.cuda()
    net.eval()

    ds = PartNormalDataset(data,
                           data_num,
                           label_test,
                           net.config,
                           npoints=args.npoints)
    test_loader = torch.utils.data.DataLoader(ds,
                                              batch_size=BATCH_SIZE,
                                              shuffle=False,
                                              num_workers=THREADS,
                                              collate_fn=tree_collate)

    cm = np.zeros((N_CLASSES, N_CLASSES))
    t = tqdm(test_loader, ncols=120)
    with torch.no_grad():
        count = 0

        for shape_id in tqdm(range(len(ds)), ncols=120):

            segmentation_ = None

            batches = []

            if args.ntree <= args.batchsize:

                batch = []
                for tree_id in range(args.ntree):
                    batch.append(ds.__getitem__(shape_id))
                batches.append(batch)
            else:
                for i in range(math.ceil(args.ntree / args.batchsize)):
                    bs = min(args.batchsize, args.ntree - i * args.batchsize)
                    batch = []
                    for tree_id in range(bs):
                        batch.append(ds.__getitem__(shape_id))
                    batches.append(batch)

            for batch in batches:

                pts, features, seg, tree = tree_collate(batch)
                if USE_CUDA:
                    features = features.cuda()
                    pts = pts.cuda()
                    for l_id in range(len(tree)):
                        tree[l_id]["points"] = tree[l_id]["points"].cuda()
                        tree[l_id]["indices"] = tree[l_id]["indices"].cuda()

                outputs = net(features, pts, tree)

                for i in range(pts.size(0)):
                    pts_src = pts[i].cpu().numpy()

                    # pts_dest
                    point_num = data_num[count]
                    pts_dest = data[count]
                    pts_dest = pts_dest[:point_num]

                    object_label = label[count]
                    category = category_list[object_label][0]
                    label_start, label_end = category_range[category]

                    seg_ = outputs[i][:, label_start:label_end].cpu().numpy()
                    seg_ = nearest_correspondance(pts_src, pts_dest, seg_)

                    if segmentation_ is None:
                        segmentation_ = seg_
                    else:
                        segmentation_ += seg_

            segmentation_ = np.argmax(segmentation_, axis=1)

            # save labels
            np.savetxt(output_filelist[count], segmentation_, fmt="%i")

            if args.ply:
                data_utils.save_ply_property(pts_dest, segmentation_, 6,
                                             output_ply_filelist[count])

            count += 1
Пример #3
0
def test(args, flist_test):
    

    N_CLASSES = 13


    # create the network
    print("Creating network...")
    if args.nocolor:
        net = Net(input_channels=1, output_channels=N_CLASSES)
    else:    
        net = Net(input_channels=3, output_channels=N_CLASSES)
    net.load_state_dict(torch.load(os.path.join(args.savedir, "state_dict.pth")))
    net.cuda()
    net.eval()
    print("parameters", count_parameters(net))

    for filename in flist_test:
        print(filename)
        ds = PartDatasetTest(filename, args.rootdir,
                            block_size=args.blocksize,
                            min_pick_per_point= args.npick,
                            npoints= args.npoints,
                            test_step=args.test_step,
                            nocolor=args.nocolor
                            )
        loader = torch.utils.data.DataLoader(ds, batch_size=args.batchsize, shuffle=False,
                                        num_workers=args.threads
                                        )

        xyzrgb = ds.xyzrgb[:,:3]
        scores = np.zeros((xyzrgb.shape[0], N_CLASSES))
        with torch.no_grad():
            t = tqdm(loader, ncols=80)
            for pts, features, indices in t:
                
                features = features.cuda()
                pts = pts.cuda()
                outputs = net(features, pts)

                outputs_np = outputs.cpu().numpy().reshape((-1, N_CLASSES))
                scores[indices.cpu().numpy().ravel()] += outputs_np

        mask = np.logical_not(scores.sum(1)==0)
        scores = scores[mask]
        pts_src = xyzrgb[mask]

        # create the scores for all points
        scores = nearest_correspondance(pts_src, xyzrgb, scores, K=1)

        # compute softmax
        scores = scores - scores.max(axis=1)[:,None]
        scores = np.exp(scores) / np.exp(scores).sum(1)[:,None]
        scores = np.nan_to_num(scores)

        os.makedirs(os.path.join(args.savedir, filename), exist_ok=True)

        # saving labels
        save_fname = os.path.join(args.savedir, filename, "pred.txt")
        scores = scores.argmax(1)
        np.savetxt(save_fname,scores,fmt='%d')

        if args.savepts:
            save_fname = os.path.join(args.savedir, filename, "pts.txt")
            xyzrgb = np.concatenate([xyzrgb, np.expand_dims(scores,1)], axis=1)
            np.savetxt(save_fname,xyzrgb,fmt=['%.4f','%.4f','%.4f','%d'])
Пример #4
0
def test(args):
    THREADS = 4
    BATCH_SIZE = args.batchsize
    USE_CUDA = True
    N_CLASSES = 50

    args.data_folder = os.path.join(args.rootdir, "test_data")

    # create the output folders
    output_folder = args.savedir + '_predictions'
    category_list = [
        (category, int(label_num))
        for (category,
             label_num) in [line.split() for line in open(args.category, 'r')]
    ]
    offset = 0
    category_range = dict()
    for category, category_label_seg_max in category_list:
        category_range[category] = (offset, offset + category_label_seg_max)
        offset = offset + category_label_seg_max
        folder = os.path.join(output_folder, category)
        if not os.path.exists(folder):
            os.makedirs(folder)

    input_filelist = []
    output_filelist = []
    output_ply_filelist = []
    for category in sorted(os.listdir(args.data_folder)):
        data_category_folder = os.path.join(args.data_folder, category)
        for filename in sorted(os.listdir(data_category_folder)):
            input_filelist.append(
                os.path.join(args.data_folder, category, filename))
            output_filelist.append(
                os.path.join(output_folder, category, filename[0:-3] + 'seg'))
            output_ply_filelist.append(
                os.path.join(output_folder + '_ply', category,
                             filename[0:-3] + 'ply'))

    # Prepare inputs
    print('{}-Preparing datasets...'.format(datetime.now()))
    data, label, data_num, label_test, _ = data_utils.load_seg(
        args.filelist_val)  # no segmentation labels

    net = Net(input_channels=1, output_channels=N_CLASSES)
    net.load_state_dict(
        torch.load(os.path.join(args.savedir, "state_dict.pth")))
    net.cuda()
    net.eval()

    ds = PartNormalDataset(data,
                           data_num,
                           label_test,
                           net.config,
                           npoints=args.npoints)
    test_loader = torch.utils.data.DataLoader(ds,
                                              batch_size=BATCH_SIZE,
                                              shuffle=False,
                                              num_workers=THREADS,
                                              collate_fn=tree_collate)

    cm = np.zeros((N_CLASSES, N_CLASSES))
    t = tqdm(test_loader, ncols=120)
    with torch.no_grad():
        count = 0
        for pts, features, seg, tree in t:

            if USE_CUDA:
                features = features.cuda()
                pts = pts.cuda()
                for l_id in range(len(tree)):
                    tree[l_id]["points"] = tree[l_id]["points"].cuda()
                    tree[l_id]["indices"] = tree[l_id]["indices"].cuda()

            outputs = net(features, pts, tree)

            # save results
            for i in range(pts.size(0)):
                # pts_src
                pts_src = pts[i].cpu().numpy()

                # pts_dest
                point_num = data_num[count + i]
                pts_dest = data[count + i]
                pts_dest = pts_dest[:point_num]

                object_label = label[count + i]
                category = category_list[object_label][0]
                label_start, label_end = category_range[category]

                seg_ = outputs[i][:, label_start:label_end].cpu().numpy()
                seg_ = np.argmax(seg_, axis=1)
                seg_ = nearest_correspondance(pts_src, pts_dest, seg_)

                # save labels
                np.savetxt(output_filelist[count + i], seg_, fmt="%i")

                if args.ply:
                    data_utils.save_ply_property(
                        pts_dest, seg_, 6, output_ply_filelist[count + i])
            count += pts.size(0)

            output_np = np.argmax(outputs.cpu().detach().numpy(),
                                  axis=2).copy()
            target_np = seg.cpu().numpy().copy()

            cm_ = confusion_matrix(target_np.ravel(),
                                   output_np.ravel(),
                                   labels=list(range(N_CLASSES)))
            cm += cm_

            oa = "{:.3f}".format(metrics.stats_overall_accuracy(cm))
            aa = "{:.3f}".format(metrics.stats_accuracy_per_class(cm)[0])

            t.set_postfix(OA=oa, AA=aa)
Пример #5
0
def train(args, flist_train, flist_test):
    N_CLASSES = 13


    # create the network
    print("Creating network...")
    if args.nocolor:
        net = Net(input_channels=1, output_channels=N_CLASSES)
    else:
        net = Net(input_channels=3, output_channels=N_CLASSES)
    net.cuda()
    print("parameters", count_parameters(net))


    print("Creating dataloader and optimizer...")
    ds = PartDatasetTrainVal(flist_train, args.rootdir,
                             training=True, block_size=args.blocksize,
                             npoints=args.npoints,iteration_number=args.batchsize*args.iter, nocolor=args.nocolor)
    train_loader = torch.utils.data.DataLoader(ds, batch_size=args.batchsize, shuffle=True,
                                        num_workers=args.threads
                                        )

    ds_val = PartDatasetTrainVal(flist_test, args.rootdir,
                             training=False, block_size=args.blocksize,
                             npoints=args.npoints, nocolor=args.nocolor)
    test_loader = torch.utils.data.DataLoader(ds_val, batch_size=args.batchsize, shuffle=False,
                                        num_workers=args.threads
                                        )

    optimizer = torch.optim.Adam(net.parameters(), lr=1e-3)
    print("done")

    # create the root folder
    print("Creating results folder")
    time_string = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
    root_folder = os.path.join(args.savedir, "s3dis_area{}_{}_nocolor{}_{}".format(args.area, args.npoints, args.nocolor, time_string))
    os.makedirs(root_folder, exist_ok=True)
    print("done at", root_folder)
    
    # create the log file
    logs = open(os.path.join(root_folder, "log.txt"), "w")

    # iterate over epochs
    for epoch in range(50):

        #######
        # training
        net.train()

        train_loss = 0
        cm = np.zeros((N_CLASSES, N_CLASSES))
        t = tqdm(train_loader, ncols=100, desc="Epoch {}".format(epoch))
        for pts, features, seg in t:

            features = features.cuda()
            pts = pts.cuda()
            seg = seg.cuda()
            
            optimizer.zero_grad()
            outputs = net(features, pts)
            loss =  F.cross_entropy(outputs.view(-1, N_CLASSES), seg.view(-1))
            loss.backward()
            optimizer.step()

            output_np = np.argmax(outputs.cpu().detach().numpy(), axis=2).copy()
            target_np = seg.cpu().numpy().copy()

            cm_ = confusion_matrix(target_np.ravel(), output_np.ravel(), labels=list(range(N_CLASSES)))
            cm += cm_

            oa = f"{metrics.stats_overall_accuracy(cm):.5f}"
            aa = f"{metrics.stats_accuracy_per_class(cm)[0]:.5f}"
            iou = f"{metrics.stats_iou_per_class(cm)[0]:.5f}"

            train_loss += loss.detach().cpu().item()

            t.set_postfix(OA=wblue(oa), AA=wblue(aa), IOU=wblue(iou), LOSS=wblue(f"{train_loss/cm.sum():.4e}"))


        ######
        ## validation
        net.eval()
        cm_test = np.zeros((N_CLASSES, N_CLASSES))
        test_loss = 0
        t = tqdm(test_loader, ncols=80, desc="  Test epoch {}".format(epoch))
        with torch.no_grad():
            for pts, features, seg in t:
                
                features = features.cuda()
                pts = pts.cuda()
                seg = seg.cuda()
                
                
                outputs = net(features, pts)
                loss =  F.cross_entropy(outputs.view(-1, N_CLASSES), seg.view(-1))

                output_np = np.argmax(outputs.cpu().detach().numpy(), axis=2).copy()
                target_np = seg.cpu().numpy().copy()

                cm_ = confusion_matrix(target_np.ravel(), output_np.ravel(), labels=list(range(N_CLASSES)))
                cm_test += cm_

                oa_val = f"{metrics.stats_overall_accuracy(cm_test):.5f}"
                aa_val = f"{metrics.stats_accuracy_per_class(cm_test)[0]:.5f}"
                iou_val = f"{metrics.stats_iou_per_class(cm_test)[0]:.5f}"

                test_loss += loss.detach().cpu().item()

                t.set_postfix(OA=wgreen(oa_val), AA=wgreen(aa_val), IOU=wgreen(iou_val), LOSS=wgreen(f"{test_loss/cm_test.sum():.4e}"))

        # save the model
        torch.save(net.state_dict(), os.path.join(root_folder, "state_dict.pth"))

        # write the logs
        logs.write(f"{epoch} {oa} {aa} {iou} {oa_val} {aa_val} {iou_val}\n")
        logs.flush()

    logs.close()