예제 #1
0
def test_showpoints():
    file_paths = "/home/fcheng/Neuron/pointnet/eval_logs/syn/syn_FPS_32768_predLabel/"

    DATA_PATH = "/home/fcheng/Neuron/pointnet/data/syn_data_FPS_32768/"
    data_set = PointDataset(data_root=DATA_PATH, mode="train", augment=True)
    # for i in data_set.__len__():
    i = 0
    name = data_set.dataNames[i]
    point_sets, _ = data_set[i]

    labels = np.load(os.path.join(file_paths, name+".npy"))
    uni_labels = np.unique(labels)
    
    colors = ncolors(len(uni_labels))
    showpoints(point_sets, c_gt=colors[labels], normalizecolor=False, background=(255,255,255), ballradius=2)
예제 #2
0
파일: test.py 프로젝트: nschor/CompoNet
def show_3d_point_clouds(shapes, is_missing_part):
    colors = np.zeros_like(shapes)
    for p in xrange(NUM_PARTS):
        colors[NUM_POINTS * p:NUM_POINTS * (p + 1), :] = COLORS[p]

    # fix orientation
    shapes[:, 1] *= -1
    shapes = shapes[:, [1, 2, 0]]

    if is_missing_part:
        shapes = shapes[:NUM_POINTS * (NUM_PARTS - 1)]
        colors = colors[:NUM_POINTS * (NUM_PARTS - 1)]
    show3d_balls.showpoints(shapes,
                            c_gt=colors,
                            ballradius=8,
                            normalizecolor=False,
                            background=[255, 255, 255])
예제 #3
0
def main():
    parser = argparse.ArgumentParser(description='AutoEncoder ShapeNet')
    parser.add_argument('--dropout_ratio', type=float, default=0)
    parser.add_argument('--trans', type=strtobool, default='true')
    parser.add_argument('--use_bn', type=strtobool, default='true')
    parser.add_argument('--residual', type=strtobool, default='false')
    parser.add_argument('--out_dim', type=int, default=3)
    parser.add_argument('--in_dim', type=int, default=3)
    parser.add_argument('--middle_dim', type=int, default=64)
    parser.add_argument('--load_file',
                        '-lf',
                        type=str,
                        default='result/model.npz')
    parser.add_argument('--class_choice', type=str, default='Chair')
    parser.add_argument('--extension', type=str, default='default')
    parser.add_argument('--num_point', type=int, default=1024)
    args = parser.parse_args()

    dropout_ratio = args.dropout_ratio
    trans = args.trans
    use_bn = args.use_bn
    residual = args.residual
    out_dim = args.out_dim
    in_dim = args.in_dim
    middle_dim = args.middle_dim
    class_choice = args.class_choice
    load_file = args.load_file
    num_point = args.num_point

    trans_lam1 = 0.001
    trans_lam2 = 0.001

    print('Load PointNet-AutoEncoder model... load_file={}'.format(load_file))
    model = ae.PointNetAE(out_dim=out_dim,
                          in_dim=in_dim,
                          middle_dim=middle_dim,
                          dropout_ratio=dropout_ratio,
                          use_bn=use_bn,
                          trans=trans,
                          trans_lam1=trans_lam1,
                          trans_lam2=trans_lam2,
                          residual=residual,
                          output_points=num_point)
    serializers.load_npz(load_file, model)

    d = dataset.ChainerPointCloudDatasetDefault(split="test",
                                                class_choice=[class_choice],
                                                num_point=num_point)

    x, _ = d.get_example(0)
    x = chainer.Variable(np.array([x]))
    with chainer.using_config('train', False), chainer.using_config(
            'enable_backprop', False):
        y, t1, t2 = model.calc(x)
    y = y.array[0]
    point_data = []
    for n in range(len(y[0])):
        point_data.append([y[0][n][0], y[1][n][0], y[2][n][0]])
    point_data = np.array(point_data)
    #print(point_data)

    from utils import show3d_balls
    show3d_balls.showpoints(d.get_data(0), ballradius=8)
    show3d_balls.showpoints(point_data, ballradius=8)
예제 #4
0
            else:
                extra_point_set, choice = data_utils.choose_points(
                    point_set, self.npoints - point_set_length)
                point_set = np.append(point_set, extra_point_set, axis=0)
                norm_part_point_set = np.append(norm_part_point_set,
                                                norm_part_point_set[choice],
                                                axis=0)
            point_sets.append((point_set, norm_part_point_set, is_full))

        return point_sets

    def __len__(self):
        return len(self.datapath)

    def get_number_of_parts(self):
        return self.num_parts


if __name__ == '__main__':
    from utils import show3d_balls

    d = PartDatasetPCN(root=os.path.join(
        BASE_DIR, '../data/shapenetcore_partanno_segmentation_benchmark_v0'),
                       class_choice='Chair',
                       split='test')
    i = 27
    point_sets = d[i]
    for p in xrange(d.get_number_of_parts()):
        ps, _, _ = point_sets[p]
        show3d_balls.showpoints(ps, ballradius=8)
예제 #5
0
def our_main():
    from utils.show3d_balls import showpoints
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--batchSize', type=int, default=32, help='input batch size')
    parser.add_argument(
        '--num_points', type=int, default=2000, help='input batch size')
    parser.add_argument(
        '--workers', type=int, help='number of data loading workers', default=4)
    parser.add_argument(
        '--nepoch', type=int, default=250, help='number of epochs to train for')
    parser.add_argument('--outf', type=str, default='cls', help='output folder')
    parser.add_argument('--model', type=str, default='', help='model path')
    parser.add_argument('--dataset', type=str, required=True, help="dataset path")
    parser.add_argument('--dataset_type', type=str, default='shapenet', help="dataset type shapenet|modelnet40")
    parser.add_argument('--feature_transform', action='store_true', help="use feature transform")

    opt = parser.parse_args()
    print(opt)

    blue = lambda x: '\033[94m' + x + '\033[0m'

    opt.manualSeed = random.randint(1, 10000)  # fix seed
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)

    if opt.dataset_type == 'shapenet':
        dataset = ShapeNetDataset(
            root=opt.dataset,
            classification=True,
            npoints=opt.num_points)

        test_dataset = ShapeNetDataset(
            root=opt.dataset,
            classification=True,
            split='test',
            npoints=opt.num_points,
            data_augmentation=False)
    elif opt.dataset_type == 'modelnet40':
        dataset = ModelNetDataset(
            root=opt.dataset,
            npoints=opt.num_points,
            split='trainval')

        test_dataset = ModelNetDataset(
            root=opt.dataset,
            split='test',
            npoints=opt.num_points,
            data_augmentation=False)
    else:
        exit('wrong dataset type')


    dataloader = torch.utils.data.DataLoader(
        dataset,
        batch_size=opt.batchSize,
        shuffle=True,
        num_workers=int(opt.workers))

    testdataloader = torch.utils.data.DataLoader(
            test_dataset,
            batch_size=opt.batchSize,
            shuffle=True,
            num_workers=int(opt.workers))

    print(len(dataset), len(test_dataset))
    num_classes = len(dataset.classes)
    print('classes', num_classes)

    try:
        os.makedirs(opt.outf)
    except OSError:
        pass

    classifier = PointNetCls(k=num_classes, feature_transform=opt.feature_transform)

    if opt.model != '':
        classifier.load_state_dict(torch.load(opt.model))


    optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999))
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
    classifier.cuda()

    num_batch = len(dataset) / opt.batchSize

    ## python train_classification.py --dataset ../dataset --nepoch=4 --dataset_type  shapenet
    for epoch in range(opt.nepoch):
        scheduler.step()
        for i, data in enumerate(dataloader, 0):
            points, target = data
            target = target[:, 0]
            showpoints(points[0].numpy())
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()
            classifier = classifier.train()
            pred, trans, trans_feat = classifier(points)
            loss = F.nll_loss(pred, target)
            if opt.feature_transform:
                loss += feature_transform_regularizer(trans_feat) * 0.001
            loss.backward()
            optimizer.step()
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.data).cpu().sum()
            print('[%d: %d/%d] train loss: %f accuracy: %f' % (epoch, i, num_batch, loss.item(), correct.item() / float(opt.batchSize)))

            if i % 10 == 0:
                j, data = next(enumerate(testdataloader, 0))
                points, target = data
                target = target[:, 0]
                points = points.transpose(2, 1)
                points, target = points.cuda(), target.cuda()
                classifier = classifier.eval()
                pred, _, _ = classifier(points)
                loss = F.nll_loss(pred, target)
                pred_choice = pred.data.max(1)[1]
                correct = pred_choice.eq(target.data).cpu().sum()
                print('[%d: %d/%d] %s loss: %f accuracy: %f' % (epoch, i, num_batch, blue('test'), loss.item(), correct.item()/float(opt.batchSize)))

        torch.save(classifier.state_dict(), '%s/cls_model_%d.pth' % (opt.outf, epoch))

    total_correct = 0
    total_testset = 0
    for i,data in tqdm(enumerate(testdataloader, 0)):
        points, target = data
        target = target[:, 0]
        points = points.transpose(2, 1)
        points, target = points.cuda(), target.cuda()
        classifier = classifier.eval()
        pred, _, _ = classifier(points)
        pred_choice = pred.data.max(1)[1]
        correct = pred_choice.eq(target.data).cpu().sum()
        total_correct += correct.item()
        total_testset += points.size()[0]

    print("final accuracy {}".format(total_correct / float(total_testset)))
예제 #6
0
def fun(xyz1, xyz2, pts2):
    with tf.device('/cpu:0'):
        points = tf.constant(np.expand_dims(pts2, 0))
        xyz1 = tf.constant(np.expand_dims(xyz1, 0))
        xyz2 = tf.constant(np.expand_dims(xyz2, 0))
        dist, idx = three_nn(xyz1, xyz2)
        #weight = tf.ones_like(dist)/3.0
        dist = tf.maximum(dist, 1e-10)
        norm = tf.reduce_sum((1.0 / dist), axis=2, keep_dims=True)
        norm = tf.tile(norm, [1, 1, 3])
        print(norm)
        weight = (1.0 / dist) / norm
        interpolated_points = three_interpolate(points, idx, weight)
    with tf.Session('') as sess:
        tmp, pts1, d, w = sess.run([xyz1, interpolated_points, dist, weight])
        #print w
        pts1 = pts1.squeeze()
    return pts1


pts1 = fun(xyz1, xyz2, pts2)
all_pts = np.zeros((104, 3))
all_pts[0:100, :] = pts1
all_pts[100:, :] = pts2
all_xyz = np.zeros((104, 3))
all_xyz[0:100, :] = xyz1
all_xyz[100:, :] = xyz2
showpoints(xyz2, pts2, ballradius=8)
showpoints(xyz1, pts1, ballradius=8)
showpoints(all_xyz, all_pts, ballradius=8)
예제 #7
0
    parser = argparse.ArgumentParser(description="arg parser")
    parser.add_argument("--idx",
                        type=int,
                        default=0,
                        required=True,
                        help="batch size for training")  # 68
    args = parser.parse_args()

    DATA_PATH = "/home/fcheng/Neuron/pointnet/data/syn_data_FPS_32768/"
    train_set = PointDataset(data_root=DATA_PATH, mode="train", augment=True)

    idx = args.idx
    point_sets, ins = train_set[idx]
    name = train_set.dataNames[idx]

    uni_l = np.unique(ins)
    ordered_labels = np.zeros_like(ins)
    for cnt, l in enumerate(uni_l):
        ordered_labels[ins == l] = cnt

    colors = ncolors(len(uni_l))

    print(name)
    print(np.unique(ordered_labels))
    showpoints(point_sets,
               c_gt=colors[ordered_labels],
               background=(255, 255, 255),
               normalizecolor=False,
               ballradius=2)
예제 #8
0
        ref_indices = tf.concat(axis=2, values=[B, correspondingfaces])
        ref_tri_part = tf.expand_dims(tf.constant(lab), 0)
        ref_part_mask = tf.gather_nd(ref_tri_part, ref_indices, name=None)

        d = PartMeshDataset('/mnt/ilcompf8d0/user/weiyuewa/dataset/shapenet/part_mesh/Models/filelists/chair.lst',
                            '/mnt/ilcompf8d0/user/weiyuewa/dataset/shapenet/part_mesh/Models/filelists/chair_cat.lst')


        points_val,ref_part_mask_val = sess.run([dst_points,ref_part_mask])

        points_val = np.squeeze(points_val)
        ref_part_mask_val = np.squeeze(ref_part_mask_val)
        c_gt = np.zeros([len(ref_part_mask_val),3])
        for i in range(len(ref_part_mask_val)):
            c_gt[i] = color_dict[ref_part_mask_val[i]]
        show3d_balls.showpoints(points_val, c_gt=c_gt, ballradius=2)

    # d = PartDataset(root=os.path.join(BASE_DIR, 'data/shapenetcore_partanno_segmentation_benchmark_v0'),
    #                 class_choice=['Chair'], split='trainval')
    # print(len(d))
    # import time
    #
    # tic = time.time()
    # i = 100
    # ps, seg = d[i]
    # print(np.max(seg), np.min(seg))
    # print(time.time() - tic)
    # print(ps.shape, type(ps), seg.shape, type(seg))
    # sys.path.append('utils')
    # import show3d_balls
    #