예제 #1
0
def dev(opt):
	
	dev_dataset = PartDataset(root = '/mnt/lustre/niuyazhe/data/BDCI/dev_set/', classification = False, train = False)
	dev_dataloader = torch.utils.data.DataLoader(dev_dataset, batch_size=opt.batchsize,
												  shuffle=False, num_workers=int(opt.workers))

	num_classes = dataset.num_seg_classes
	num_points = dataset.num_points
	print(len(train_dataset), len(dev_dataset))
	print('classes', num_classes)
	print('points', num_points)

	blue = lambda x:'\033[94m' + x + '\033[0m'
	net = PointNetSeg(k = num_classes)
	net = nn.DataParallel(net.cuda())
	if opt.dev_model != '':
		net.load_state_dict(torch.load(opt.dev_model))
	net.eval()
	for i, data in enumerate(dev_dataloader, 0):
		points, target = data
		target = target.long()
		points, target = Variable(points).cuda(), Variable(target).cuda()
		output, transform = net(points)
	
		output_choice = output.data.max(2)[1]
		correct = output_choice.eq(target.data).cpu().sum()
		print('[%s: %d/%d] %s accuracy: %f' %("dev", i, num_batch, blue('test'), correct.item()/float(opt.batchSize * num_points)))
		for i in range(1,8):
			continue
예제 #2
0
    pts_kept = []
    circle_radius = random.randint(1, 3)
    i = np.random.choice(len(pts))
    random_pt = pts[i]
    new_pts = []
    for pt_idx, point in enumerate(pts):
        if np.linalg.norm(point - random_pt) > circle_radius:
            new_pts.append(point)
            pts_kept.append(pt_idx)
    return np.array(new_pts), pts_kept


dataset = PartDataset(root='demo_data',
                      task='multi_task',
                      npoints=opt.num_points,
                      mode='test',
                      min_pts=0,
                      num_seg_class=5,
                      load_in_memory=False)

cls_class_to_idx = dataset.classes
cls_idx_to_class = {v: k for k, v in dataset.classes.items()}

idx = opt.idx

print("model %d/%d" % (idx, len(dataset)))

point, cls, seg = dataset[idx]
damaged_point, pt_idx = get_damaged_points(point.numpy())
damaged_point = torch.from_numpy(damaged_point)
damaged_seg = seg[pt_idx]
parser.add_argument('--outf', type=str, default='cls',  help='output folder')
parser.add_argument('--model', type=str, default = '',  help='model path')
parser.add_argument('--n_views', type=int, default = 13,  help='view numbers')
parser.add_argument('--lr', type=float, default = 0.01,  help='learning rate')
parser.add_argument('--momentum', type=float, default = 0.9,  help='momentum')
opt = parser.parse_args()
print (opt)

blue = lambda x:'\033[94m' + x + '\033[0m'

opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)

dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', classification = True, npoints = opt.num_points)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
                                          shuffle=True, num_workers=int(opt.workers))

test_dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', classification = True, train = False, npoints = opt.num_points)
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchSize,
                                          shuffle=True, num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
num_classes = len(dataset.classes)
print('classes', num_classes)

try:
    os.makedirs(opt.outf)
except OSError:
    pass
예제 #4
0
parser.add_argument('--num_points', type=int, default = 2500,  help='number of points')
parser.add_argument('--dataset', type=str, required=True, help='dataset root')
parser.add_argument('--class_choice', type=str, required=True, help='class choice')
parser.add_argument('--noise_vec_size', type=int, default=100, help='size of noise vector')

opt = parser.parse_args()
print (opt)

blue = lambda x:'\033[94m' + x + '\033[0m'

opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)

dataset = PartDataset(root = opt.dataset, class_choice = [opt.class_choice], classification = True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
                                          shuffle=True, num_workers=int(opt.workers))


cudnn.benchmark = True

num_classes = len(dataset.classes)

try:
    os.makedirs(opt.outf)
except OSError:
    pass


classifier = PointNetCls(k = 2, num_points = opt.num_points)
예제 #5
0
parser.add_argument('--workers', type=int, help='number of data loading workers', default=0)
parser.add_argument('--nepoch', type=int, default=30, help='number of epochs to train for')
parser.add_argument('--outf', type=str, default='seg',  help='output folder')
parser.add_argument('--model', type=str, default= '',  help='model path')


opt = parser.parse_args()
print (opt)

opt.manualSeed = random.randint(1, 2500) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)

num_points = 2700																							 
dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', npoints=num_points, classification=False, class_choice=['Chair'])
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
                                          shuffle=True, num_workers=int(opt.workers))

test_dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', npoints=num_points, classification=False, class_choice=['Chair'], train=False)
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchSize,
                                          shuffle=True, num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
num_classes = 10
print('classes', num_classes)
try:
    os.makedirs(opt.outf)
except OSError:
    pass
예제 #6
0
                    help='number of epochs to train for')
parser.add_argument('--outf', type=str, default='gan', help='output folder')
parser.add_argument('--model', type=str, default='', help='model path')

opt = parser.parse_args()
print(opt)

blue = lambda x: '\033[94m' + x + '\033[0m'

opt.manualSeed = random.randint(1, 10000)  # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)

dataset = PartDataset(root='shapenetcore_partanno_segmentation_benchmark_v0',
                      class_choice=['Chair'],
                      classification=True)
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=opt.batchSize,
                                         shuffle=True,
                                         num_workers=int(opt.workers))

test_dataset = PartDataset(
    root='shapenetcore_partanno_segmentation_benchmark_v0',
    class_choice=['Chair'],
    classification=True,
    train=False)
testdataloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=opt.batchSize,
                                             shuffle=True,
                                             num_workers=int(opt.workers))
예제 #7
0
                    default=25,
                    help='number of epochs to train for')
parser.add_argument('--nowepoch', type=int, default=0, help='current epoch')
parser.add_argument('--outf', type=str, default='seg', help='output folder')
parser.add_argument('--model', type=str, default='', help='model path')
parser.add_argument('--lr', type=float, default=0.01, help='learning rate')

opt = parser.parse_args()
print(opt)

opt.manualSeed = random.randint(1, 10000)  # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)

dataset = PartDataset(root='..//Thingi10K//sdf_polar', train=True)
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=opt.batchSize,
                                         shuffle=True,
                                         num_workers=int(opt.workers))

test_dataset = PartDataset(root='..//Thingi10K//sdf_polar', train=False)
testdataloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=opt.batchSize,
                                             shuffle=True,
                                             num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
try:
    os.makedirs(opt.outf)
except OSError:
예제 #8
0
    
    R = np.array(((ct, st, 0), (-st, ct, 0), (0, 0, 1)))
    
    # Construct the rotation matrix  ( V Transpose(V) - I ) R.
    
    M = (np.outer(V, V) - np.eye(3)).dot(R)
    return M

blue = lambda x:'\033[94m' + x + '\033[0m'

opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)

dataset = PartDataset(root = opt.dataset, class_choice = [opt.class_choice], canonicalize=True, npoints = opt.num_points, train=not opt.test)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
                                          shuffle=True, num_workers=int(opt.workers))

opt.outf += "_" + opt.class_choice

# Create summary logger
writer = SummaryWriter("runs/" + opt.outf + "_" + str(time.time()))

cudnn.benchmark = True

num_classes = len(dataset.classes)
print('classes', num_classes)


try:
예제 #9
0
def validation_data_loader():
    #_fname = '%s/data/x_y1_y2_idx_200_withoutnoise.h5'%root
    _fname = '%s/data/x_y1_y2_idx_33_withoutnoise.h5'%root
    dataset = PartDataset(_fname,1, conf.batch_size,train=False)  #train=False
    return DataLoader(dataset, batch_size=conf.batch_size,
                     shuffle=False, num_workers=conf.threads)
예제 #10
0
def training_data_loader():
    #_fname = '%s/data/x_y1_y2_idx_2017060708_normalise.h5'%root  
    _fname = '%s/data/x_y1_y2_idx_33_withoutnoise.h5'%root  
    dataset = PartDataset(_fname,300, conf.batch_size,train=True)  #train=True  3050
    return DataLoader(dataset, batch_size=conf.batch_size,
                     shuffle=False, num_workers=conf.threads)
예제 #11
0
def main():
    global args
    opt = parser.parse_args()
    print(opt)

    opt.manualSeed = random.randint(1, 10000)  # fix seed
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)

    dataset = PartDataset(
        root='shapenetcore_partanno_segmentation_benchmark_v0',
        pic2point=True,
        npoints=opt.num_points)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=opt.batchSize,
                                             shuffle=True,
                                             num_workers=int(opt.workers))
    print("number of training data:" + str(len(dataset)))
    test_dataset = PartDataset(
        root='shapenetcore_partanno_segmentation_benchmark_v0',
        pic2point=True,
        train=False,
        npoints=opt.num_points)
    testdataloader = torch.utils.data.DataLoader(test_dataset,
                                                 batch_size=opt.batchSize,
                                                 shuffle=True,
                                                 num_workers=int(opt.workers))
    print("number of training data:" + str(len(test_dataset)))

    # creat model
    print("model building...")
    model = pic2points(num_points=opt.num_points)
    model.cuda()

    # load pre-existing weights
    if opt.model != '':
        model.load_state_dict(torch.load(opt.model))

    optimizer = torch.optim.Adadelta(model.parameters(), weight_decay=1e-4)
    num_batch = len(dataset) / opt.batchSize

    print('training mode ------------------')
    for epoch in range(opt.nepoch):
        print("epoch:" + str(epoch))
        for i, data in enumerate(dataloader, 0):
            im, points = data
            im, points = Variable(im), Variable(points)
            im, points = im.cuda(), points.cuda()
            pred = model(im)
            loss = batch_NN_loss(pred, points).cuda()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if i % 50 is 0:
                print("training loss is:" + str(loss.data[0]))

        loss_test = 0
        for i, data in enumerate(dataloader, 0):
            im_test, points_test = data
            im_test, points_test = Variable(im_test), Variable(points_test)
            im_test, points_test = im_test.cuda(), points_test.cuda()
            pred_test = model(im_test)
            loss_test = batch_NN_loss(pred_test, points_test).cuda()
        print("Testing loss is:" + str(loss_test.data[0]))
예제 #12
0
opt = parser.parse_args()
print(opt)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('using device: {}'.format(device))

blue = lambda x: '\033[94m' + x + '\033[0m'

opt.manualSeed = random.randint(1, 10000)  # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)

dataset = PartDataset(root='data/roofn3d_data_multitask_v1',
                      task='classification',
                      npoints=opt.num_points,
                      load_in_memory=True)
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=opt.batchSize,
                                         shuffle=True,
                                         num_workers=int(opt.workers))

test_dataset = PartDataset(root='data/roofn3d_data_multitask_v1',
                           task='classification',
                           train=False,
                           npoints=opt.num_points,
                           load_in_memory=True)
testdataloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=opt.batchSize,
                                             shuffle=True,
                                             num_workers=int(opt.workers))
예제 #13
0
    
    R = np.array(((ct, st, 0), (-st, ct, 0), (0, 0, 1)))
    
    # Construct the rotation matrix  ( V Transpose(V) - I ) R.
    
    M = (np.outer(V, V) - np.eye(3)).dot(R)
    return M

blue = lambda x:'\033[94m' + x + '\033[0m'

opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)

dataset = PartDataset(root = opt.dataset, class_choice = [opt.class_choice], classification = True, npoints = opt.num_points, train=not opt.test)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
                                          shuffle=True, num_workers=int(opt.workers))

opt.outf += "_" + opt.class_choice

# Create summary logger
writer = SummaryWriter("runs/" + opt.outf + "_" + str(time.time()))

num_classes = len(dataset.classes)
print('classes', num_classes)

try:
    os.makedirs(opt.outf)
except OSError:
    pass
예제 #14
0
from datasets import PartDataset
from pointnet import PointGen, PointGenC, PointGenComp
import torch.nn.functional as F
import matplotlib.pyplot as plt

#showpoints(np.random.randn(2500,3), c1 = np.random.uniform(0,1,size = (2500)))

parser = argparse.ArgumentParser()

parser.add_argument('--model', type=str, default='', help='model path')

opt = parser.parse_args()
print(opt)

dataset = PartDataset(root='shapenetcore_partanno_segmentation_benchmark_v0',
                      class_choice=['Chair'],
                      shape_comp=True)

gen = PointGenComp()
gen.load_state_dict(torch.load(opt.model))

ld = len(dataset)

idx = np.random.randint(ld)

print(ld, idx)

_, part = dataset[idx]

sim_noise = Variable(torch.randn(2, 1024))
sim_noises = Variable(torch.zeros(30, 1024))
예제 #15
0
파일: IOU.py 프로젝트: PaParaZz1/PointNet
def dev(opt):

    dev_dataset = PartDataset(root='/mnt/lustre/niuyazhe/', train=False)
    dev_dataloader = torch.utils.data.DataLoader(dev_dataset,
                                                 batch_size=opt.batchsize,
                                                 shuffle=False,
                                                 num_workers=int(opt.workers))

    num_classes = 8  #dev_dataset.num_seg_classes
    batch_size = opt.batchsize
    num_points = 52480
    print('classes', num_classes)

    net = PointNetSeg(num_class=num_classes)
    net = nn.DataParallel(net.cuda())
    if opt.dev_model != '':
        net.load_state_dict(torch.load(opt.dev_model))
        print('path{}'.format(opt.dev_model))
    else:
        raise BaseException("no pretrained model")
    net.eval()
    global_IOU = 0.
    count = 0
    for i, data in enumerate(dev_dataloader):
        points, target = data
        target = target.long()
        points = points.permute(0, 2, 1)
        target = target.cuda()
        points = Variable(points).cuda()
        output, _ = net(points)
        print("inference over")

        output_choice = output.data.max(2)[1]
        correct = torch.eq(output_choice, target).sum()
        print('accuracy: %f' % (correct / float(batch_size * num_points)))
        local_IOU = 0.
        val = 7
        o1 = torch.eq(output_choice, 1)
        t1 = torch.eq(target, 1)
        up = (o1 & t1).sum()
        div = (o1 | t1).sum()
        print('up{}'.format(up))
        print('div{}'.format(div))
        if div == 0:
            val -= 1
        else:
            r1 = up * 1.0 / div
            local_IOU += r1

        o1 = torch.eq(output_choice, 2)
        t1 = torch.eq(target, 2)
        div = (o1 | t1).sum()
        if div == 0:
            val -= 1
        else:
            r1 = ((o1 & t1).sum()) / div
            local_IOU += r1

        o1 = torch.eq(output_choice, 3)
        t1 = torch.eq(target, 3)
        div = (o1 | t1).sum()
        if div == 0:
            val -= 1
        else:
            r1 = ((o1 & t1).sum()) / div
            local_IOU += r1

        o1 = torch.eq(output_choice, 4)
        t1 = torch.eq(target, 4)
        div = (o1 | t1).sum()
        if div == 0:
            val -= 1
        else:
            r1 = ((o1 & t1).sum()) / div
            local_IOU += r1

        o1 = torch.eq(output_choice, 5)
        t1 = torch.eq(target, 5)
        div = (o1 | t1).sum()
        if div == 0:
            val -= 1
        else:
            r1 = ((o1 & t1).sum()) / div
            local_IOU += r1

        o1 = torch.eq(output_choice, 6)
        t1 = torch.eq(target, 6)
        div = (o1 | t1).sum()
        if div == 0:
            val -= 1
        else:
            r1 = ((o1 & t1).sum()) / div
            local_IOU += r1

        o1 = torch.eq(output_choice, 7)
        t1 = torch.eq(target, 7)
        div = (o1 | t1).sum()
        if div == 0:
            val -= 1
        else:
            r1 = ((o1 & t1).sum()) / div
            local_IOU += r1
        local_IOU /= val
        global_IOU += local_IOU
        count += 1
        print("count:{}".format(count))
        print("local IOU:{}".format(local_IOU))
        if count == 1000:
            break
    print("global IOU:%.8f" % (global_IOU / count))
예제 #16
0
# In[ ]:

# General parameters
NUM_POINTS = 10000
MODEL_PATH = './cls_model.pth'
DATA_FOLDER = './shapenetcore_partanno_segmentation_benchmark_v0'

# download dataset and pre-trained model
download.download_contents()

# In[ ]:

# Create dataset object
test_dataset_seg = PartDataset(root=DATA_FOLDER,
                               train=False,
                               classification=False,
                               npoints=NUM_POINTS)

# Problem ontology
classes_dict = {
    'Airplane': 0,
    'Bag': 1,
    'Cap': 2,
    'Car': 3,
    'Chair': 4,
    'Earphone': 5,
    'Guitar': 6,
    'Knife': 7,
    'Lamp': 8,
    'Laptop': 9,
    'Motorbike': 10,
예제 #17
0
parser = argparse.ArgumentParser()

parser.add_argument('--model',
                    type=str,
                    default='./seg/seg_model_29_0.810.pth',
                    help='model path')
parser.add_argument('--cat', type=str, default='tools', help='category name')
parser.add_argument('--idx', type=int, default=0, help='data index')

opt = parser.parse_args()
print(opt)

num_classes = 10
num_points = 2700
d = PartDataset(root='shapenetcore_partanno_segmentation_benchmark_v0',
                npoints=num_points,
                class_choice=opt.cat,
                train=False)

idx = opt.idx

print("model %d/%d" % (idx, len(d)))

point_np, seg = d[idx]
point = torch.from_numpy(point_np)
point_np[:, 2] *= -1

cmap = plt.cm.get_cmap("hsv", 5)
cmap = np.array([cmap(i) for i in range(10)])[:, :3]
gt = cmap[seg - 1, :]

classifier = PointNetDenseCls(num_points=num_points, k=num_classes)
예제 #18
0
print(opt)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('using device: {}'.format(device))

model_dir = opt.outf

blue = lambda x: '\033[94m' + x + '\033[0m'

opt.manualSeed = random.randint(1, 10000)  # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)

dataset = PartDataset(root=os.path.join(opt.input_path, 'train'),
                      task='multi_task',
                      npoints=opt.num_points,
                      min_pts=0,
                      load_in_memory=True,
                      num_seg_class=5)
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=opt.batchSize,
                                         shuffle=True,
                                         num_workers=int(opt.workers))

val_dataset = PartDataset(root=os.path.join(opt.input_path, 'val'),
                          task='multi_task',
                          mode='val',
                          npoints=opt.num_points,
                          min_pts=0,
                          load_in_memory=True,
                          num_seg_class=5)
valdataloader = torch.utils.data.DataLoader(val_dataset,
    opt.knn = 16  # yw

    np.random.seed(100)
    pt = np.random.rand(250, 3)
    # fig = plt.figure()
    # ax = fig.add_subplot(111,projection='3d')

    #ax.scatter(pt[:,0],pt[:,1],pt[:,2])
    #plt.show()

    class_choice = 'Airplane'
    pt_root = 'shapenetcore_partanno_segmentation_benchmark_v0'
    npoints = 2500

    shapenet_dataset = PartDataset(root=pt_root,
                                   class_choice=class_choice,
                                   classification=True,
                                   train=True)
    print('len(shapenet_dataset) :', len(shapenet_dataset))
    dataloader = torch.utils.data.DataLoader(shapenet_dataset,
                                             batch_size=1,
                                             shuffle=False)

    li = list(enumerate(dataloader))
    print(len(li))

    # ps,cls = shapenet_dataset[0]
    # print('ps.size:',ps.size())
    # print('ps.type:',ps.type())
    # print('cls.size',cls.size())
    # print('cls.type',cls.type())
예제 #20
0
from show3d_balls import *

parser = argparse.ArgumentParser()

parser.add_argument('--model', type=str, default='', help='model path')
parser.add_argument('--idx', type=int, default=0, help='model index')
parser.add_argument('--className', type=str, default='Chair', help='number of classes')
parser.add_argument('--radius', type=int, default=2, help='radius of ball for visualization')
parser.add_argument('--cmap', type=str, default='hsv', help='valid matplotlib cmap')
parser.add_argument('--npoints', type=int, default=2500, help='points to sample')

opt = parser.parse_args()

idx = opt.idx

d = PartDataset(root='shapenetcore_partanno_segmentation_benchmark_v0', class_choice=[opt.className], train=False, npoints=2048)

print('model %d/%d' % (idx, len(d)))

num_class = d.num_classes
print('number of classes', num_class)

point, seg = d[idx]

point_np = point.numpy()

cmap = plt.cm.get_cmap(opt.cmap, 10)
cmap = np.array([cmap(i) for i in range(10)])[:, :3]
gt = cmap[seg.numpy()-1, :]

classifier = PointNetDenseCls(k=shapenet_labels[opt.className])
예제 #21
0
                    default=200,
                    help='number of epochs to train for')
parser.add_argument('--outf', type=str, default='seg', help='output folder')
parser.add_argument('--model', type=str, default='', help='model path')

numpoints = 2500
opt = parser.parse_args()
print(opt)
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
opt.manualSeed = random.randint(1, 10000)  # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cate = 'Guitar'
train_dataset = PartDataset(root='test250',
                            npoints=numpoints,
                            class_choice=cate)

train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=opt.batchSize,
                                           shuffle=True,
                                           num_workers=int(opt.workers))

test_dataset = PartDataset(root='test250',
                           npoints=numpoints,
                           class_choice=cate,
                           train=False)
test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=1,
                                          shuffle=False,
                                          num_workers=int(opt.workers))
예제 #22
0
                    default=4)
parser.add_argument('--num_points',
                    type=int,
                    default=1000,
                    help='input batch size')
parser.add_argument('--min_points',
                    type=int,
                    default=0,
                    help='smallest point cloud')

opt = parser.parse_args()

test_dataset = PartDataset(root=os.path.join(opt.input_path, 'test'),
                           task='classification',
                           mode='test',
                           npoints=opt.num_points,
                           min_pts=0,
                           load_in_memory=True,
                           num_seg_class=5)
testdataloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=opt.batchSize,
                                             shuffle=False,
                                             num_workers=opt.workers)
num_batch = len(test_dataset) / opt.batchSize

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

classifier = PointNetCls(k=len(test_dataset.classes)).to(device)

classifier.load_state_dict(torch.load(opt.model))
classifier.eval()
예제 #23
0
                    help='number of epochs to train for')
parser.add_argument('--outf', type=str, default='cls', help='output folder')
parser.add_argument('--model', type=str, default='', help='model path')

opt = parser.parse_args()
print(opt)

blue = lambda x: '\033[94m' + x + '\033[0m'

opt.manualSeed = random.randint(1, 10000)  # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)

dataset = PartDataset(root='/data2/leo/bop/bop_renderer/samples/cls/',
                      classification=True,
                      npoints=opt.num_points)
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=opt.batchSize,
                                         shuffle=True,
                                         num_workers=int(opt.workers))

test_dataset = PartDataset(root='/data2/leo/bop/bop_renderer/samples/cls/',
                           classification=True,
                           train=False,
                           npoints=opt.num_points)
testdataloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=opt.batchSize,
                                             shuffle=True,
                                             num_workers=int(opt.workers))
예제 #24
0
parser.add_argument('--batchSize', type=int, default=4, help='input batch size')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--nepoch', type=int, default=25, help='number of epochs to train for')
parser.add_argument('--outf', type=str, default='seg',  help='output folder')
parser.add_argument('--model', type=str, default = '',  help='model path')


opt = parser.parse_args()
print (opt)

#opt.manualSeed = random.randint(1, 10000) # fix seed
#print("Random Seed: ", opt.manualSeed)
#random.seed(opt.manualSeed)
#torch.manual_seed(opt.manualSeed)

dataset = PartDataset( classification = False)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
                                          shuffle=True)#, num_workers=int(opt.workers)

test_dataset = PartDataset( classification = False, class_choice = ['Chair'], train = False)
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchSize,
                                          shuffle=True, num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
num_classes = dataset.num_seg_classes
print('classes', num_classes)
try:
    os.makedirs(opt.outf)
except OSError:
    pass
예제 #25
0
from pointnet import PointNetDenseCls
import torch.nn.functional as F
import matplotlib.pyplot as plt

#showpoints(np.random.randn(2500,3), c1 = np.random.uniform(0,1,size = (2500)))

parser = argparse.ArgumentParser()

parser.add_argument('--model', type=str, default='', help='model path')
parser.add_argument('--idx', type=int, default=0, help='model index')

opt = parser.parse_args()
print(opt)

d = PartDataset(root='/media/cba62/Elements/PointNet_Data',
                class_choice=['Chair'],
                train=False)

idx = opt.idx

print("model %d/%d" % (idx, len(d)))

point, seg = d[idx]
print(point.size(), seg.size())

point_np = point.numpy()

cmap = plt.cm.get_cmap("hsv", 10)
cmap = np.array([cmap(i) for i in range(10)])[:, :3]
gt = cmap[seg.numpy() - 1, :]
예제 #26
0
                    help='Classification Category to train segmentation on')
parser.add_argument('--scoresFolder',
                    type=str,
                    default='scores/SixMolecules',
                    help='Folder for scores')

opt = parser.parse_args()
print(opt)

opt.manualSeed = random.randint(1, 10000)  # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)

dataset = PartDataset(root='T2Dataset',
                      classification=False,
                      class_choice=opt.classification)
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=opt.batchSize,
                                         shuffle=True,
                                         num_workers=int(opt.workers))

test_dataset = PartDataset(root='T2Dataset',
                           classification=False,
                           class_choice=opt.classification,
                           train=False)
testdataloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=opt.batchSize,
                                             shuffle=True,
                                             num_workers=int(opt.workers))
예제 #27
0
        x2 = F.relu(self.fc5(x))
        x2 = F.relu(self.fc6(x2))
        x2 = F.relu(self.fc7(x2))
        x2 = self.fc8(x2)

        offset = x2.view(batchsize, 4, 3)

        return code, offset


gen = PointCodeGen()
gen.load_state_dict(torch.load(opt.model + 'G_40.pth'))

dataset = PartDataset(root='shapenetcore_partanno_segmentation_benchmark_v0',
                      class_choice=['Chair'],
                      parts_also=True,
                      npoints=2048)
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=64,
                                         shuffle=True,
                                         num_workers=1)

ae.cuda()
gen.cuda()

bs = 32

sim_noise = Variable(torch.randn(bs, 100)).cuda()
fake, pos = gen(sim_noise)
fake = fake.contiguous()
pos = pos.contiguous()
예제 #28
0
from datasets import PartDataset
import torch

dataset = PartDataset(root='shapenetcore_partanno_segmentation_benchmark_v0',
                      classification=True,
                      npoints=2500)
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=1,
                                         shuffle=True,
                                         num_workers=0)

for i, data in enumerate(dataloader, 0):
    points, target = data
    print points, points.shape, target
    if i == 4:
        break
예제 #29
0
from pointnet import PointNetDenseCls
import torch.nn.functional as F
import matplotlib.pyplot as plt

#showpoints(np.random.randn(2500,3), c1 = np.random.uniform(0,1,size = (2500)))

parser = argparse.ArgumentParser()

parser.add_argument('--model', type=str, default='', help='model path')
parser.add_argument('--idx', type=int, default=0, help='model index')

opt = parser.parse_args()
print(opt)

d = PartDataset(root='shapenetcore_partanno_segmentation_benchmark_v0',
                class_choice=['Chair'],
                train=False)

idx = opt.idx

print("model %d/%d" % (idx, len(d)))

point, seg = d[idx]
print(point.size(), seg.size())

point_np = point.numpy()

cmap = plt.cm.get_cmap("hsv", 10)
cmap = np.array([cmap(i) for i in range(10)])[:, :3]
gt = cmap[seg.numpy() - 1, :]
예제 #30
0
import torch.nn.functional as F
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D


#showpoints(np.random.randn(2500,3), c1 = np.random.uniform(0,1,size = (2500)))

parser = argparse.ArgumentParser()

parser.add_argument('--model', type=str, default = '',  help='model path')


opt = parser.parse_args()
print (opt)

test_dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0' , train = False, classification = True)

testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle = False)


classifier = PointNetCls(k = len(test_dataset.classes))
classifier.cuda()
classifier.load_state_dict(torch.load(opt.model))
classifier.eval()
total_correct=0
total_count=0
for i, data in enumerate(testdataloader, 0):
	points, target = data
	points_input = points.cpu().numpy()
	print points_input.shape
	# points_input = points_input[:,::5,:]