def main(argv=None):
    print('Hello! This is XXXXXX Program')

    trn_ds = PartDataset(root=opt.directory, npoints=opt.num_points, classification=False, class_choice=['pipe'])
    val_ds = PartDataset(root=opt.directory, npoints=opt.num_points, classification=False, class_choice=['pipe'], train=False)
    num_classes = trn_ds.num_seg_classes

    trn_dl = DataLoader(trn_ds, batch_size=opt.batch_size, shuffle=True, num_workers=0, pin_memory=True)
    val_dl = DataLoader(val_ds, batch_size=32, shuffle=False, num_workers=0, pin_memory=True)

    data = ModelData(opt.directory, trn_dl, val_dl)

    classifier = PointNetDenseCls(num_points=opt.num_points, k=num_classes)

    learn = ConvLearner.from_model_data(classifier.cuda(), data=data)
    learn.crit = nn.CrossEntropyLoss()
    learn.metrics = [accuracy]

    learn.clip = 1e-1
    learn.fit(1.5, 1, wds=1e-4, cycle_len=20, use_clr_beta=(12, 15, 0.95, 0.85))

    preds, targs = learn.TTA()
Exemplo n.º 2
0
test_dataset = PartDataset( classification = False, class_choice = ['Chair'], train = False)
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchSize,
                                          shuffle=True, num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
num_classes = dataset.num_seg_classes
print('classes', num_classes)
try:
    os.makedirs(opt.outf)
except OSError:
    pass

blue = lambda x:'\033[94m' + x + '\033[0m'


classifier = PointNetDenseCls(k = 7)

if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))

optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
classifier.cuda()

num_batch = len(dataset)/opt.batchSize

for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader):
        #label_filename = "{}/{}.txt".format('/home/emeka/Schreibtisch/AIS/ais3d/PCD_Files/Labeled',dataset.ids[i] )
        #if not os.path.isfile(label_filename):
        #   print('PASS')
            #continue
def main(argv=None):
    print('Hello! This is XXXXXX Program')

    num_points = 2048
    dataset = PartDataset(root='DATA/ARLab/objects',
                          npoints=num_points,
                          classification=False,
                          class_choice=['pipe'])
    # dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=False, num_workers=int(opt.workers))
    dataloader = DataLoader(dataset,
                            batch_size=opt.batchSize,
                            shuffle=False,
                            num_workers=int(opt.workers))

    test_dataset = PartDataset(root='DATA/ARLab/objects',
                               npoints=num_points,
                               classification=False,
                               class_choice=['pipe'])
    testdataloader = torch.utils.data.DataLoader(test_dataset,
                                                 batch_size=opt.batchSize,
                                                 shuffle=True,
                                                 num_workers=int(opt.workers))

    num_classes = dataset.num_seg_classes

    blue = lambda x: '\033[94m' + x + '\033[0m'

    classifier = PointNetDenseCls(num_points=num_points, k=num_classes)

    if opt.model != '':
        classifier.load_state_dict(torch.load(opt.model))

    optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
    classifier.cuda()

    num_batch = len(dataset) / opt.batchSize

    for epoch in range(opt.nepoch):
        for i, data in enumerate(dataloader, 0):
            points, target = data
            points, target = Variable(points), Variable(target)
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()
            print(points.shape)
            pred, _ = classifier(points)
            pred = pred.view(-1, num_classes)
            target = target.view(-1, 1)[:, 0] - 1
            # print(pred.size(), target.size())
            loss = F.nll_loss(pred, target)
            loss.backward()
            optimizer.step()
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.data).cpu().sum()
            print('[%d: %d/%d] train loss: %f accuracy: %f' %
                  (epoch, i, num_batch, loss.item(),
                   correct.item() / float(list(target.shape)[0])))

            if i % 10 == 0:
                j, data = next(enumerate(testdataloader, 0))
                points, target = data
                points, target = Variable(points), Variable(target)
                points = points.transpose(2, 1)
                points, target = points.cuda(), target.cuda()
                pred, _ = classifier(points)
                pred = pred.view(-1, num_classes)
                target = target.view(-1, 1)[:, 0] - 1

                loss = F.nll_loss(pred, target)
                pred_choice = pred.data.max(1)[1]
                correct = pred_choice.eq(target.data).cpu().sum()
                print('[%d: %d/%d] %s loss: %f accuracy: %f' %
                      (epoch, i, num_batch, blue('test'), loss.item(),
                       correct.item() / float(list(target.shape)[0])))

        torch.save(classifier.state_dict(),
                   '%s/seg_model_%d.pth' % (opt.outf, epoch))
Exemplo n.º 4
0
test_dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', classification = False, class_choice = ['Chair'], train = False)
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchSize,
                                          shuffle=True, num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
num_classes = dataset.num_seg_classes
print('classes', num_classes)
try:
    os.makedirs(opt.outf)
except OSError:
    pass

blue = lambda x:'\033[94m' + x + '\033[0m'


classifier = PointNetDenseCls(k = num_classes)

if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))

optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
classifier.cuda()

num_batch = len(dataset)/opt.batchSize

for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader, 0):
        points, target = data
        points, target = Variable(points), Variable(target)
        points = points.transpose(2,1) 
        points, target = points.cuda(), target.cuda()   
Exemplo n.º 5
0
                train=False)

idx = opt.idx

print("model %d/%d" % (idx, len(d)))

point, seg = d[idx]
print(point.size(), seg.size())

point_np = point.numpy()

cmap = plt.cm.get_cmap("hsv", 10)
cmap = np.array([cmap(i) for i in range(10)])[:, :3]
gt = cmap[seg.numpy() - 1, :]

classifier = PointNetDenseCls(k=4)  #K=4,与chair对应
classifier.load_state_dict(torch.load(opt.model))

point = point.transpose(1, 0).contiguous()

point = Variable(point.view(1, point.size()[0], point.size()[1]))
pred, _ = classifier(point)

pred_choice = pred.data.max(2)[1][0, :, 0]
#print(pred_choice.size())
pred_color = cmap[pred_choice.numpy(), :]

#print(pred_color.shape)

showpoints(point_np, gt, pred_color)
Exemplo n.º 6
0
def main(argv=None):
    print('Hello! This is XXXXXX Program')

    ## Load PointNet config
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', type=str, default='./seg/seg_model_1.pth', help='model path')
    opt = parser.parse_args()
    print(opt)

    ## Load PointNet model
    num_points = 2700
    classifier = PointNetDenseCls(num_points=num_points, k=10)
    classifier.load_state_dict(torch.load(opt.model))
    classifier.eval()

    ### Config visualization
    cmap = plt.cm.get_cmap("hsv", 5)
    cmap = np.array([cmap(i) for i in range(10)])[:, :3]
    # gt = cmap[seg - 1, :]


    ## Initialize OpenNi
    # dist = './driver/OpenNI-Linux-x64-2.3/Redist'
    dist = './driver/OpenNI-Windows-x64-2.3/Redist'
    openni2.initialize(dist)
    if (openni2.is_initialized()):
        print("openNI2 initialized")
    else:
        print("openNI2 not initialized")

    ## Register the device
    dev = openni2.Device.open_any()

    ## Create the streams stream
    rgb_stream = dev.create_color_stream()
    depth_stream = dev.create_depth_stream()

    ## Define stream parameters
    w = 320
    h = 240
    fps = 30

    ## Configure the rgb_stream -- changes automatically based on bus speed
    rgb_stream.set_video_mode(
        c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=w, resolutionY=h,
                           fps=fps))

    ## Configure the depth_stream -- changes automatically based on bus speed
    # print 'Depth video mode info', depth_stream.get_video_mode() # Checks depth video configuration
    depth_stream.set_video_mode(
        c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=w, resolutionY=h,
                           fps=fps))

    ## Check and configure the mirroring -- default is True
    ## Note: I disable mirroring
    # print 'Mirroring info1', depth_stream.get_mirroring_enabled()
    depth_stream.set_mirroring_enabled(False)
    rgb_stream.set_mirroring_enabled(False)

    ## Start the streams
    rgb_stream.start()
    depth_stream.start()

    ## Synchronize the streams
    dev.set_depth_color_sync_enabled(True)  # synchronize the streams

    ## IMPORTANT: ALIGN DEPTH2RGB (depth wrapped to match rgb stream)
    dev.set_image_registration_mode(openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR)

    saving_folder_path = './shapenetcore_partanno_segmentation_benchmark_v0/tools/'
    if not os.path.exists(saving_folder_path):
        os.makedirs(saving_folder_path+'RGB')
        os.makedirs(saving_folder_path+'D')
        os.makedirs(saving_folder_path+'PC')
        os.makedirs(saving_folder_path+'points')
        os.makedirs(saving_folder_path+'points_label')

    from config import CAMERA_CONFIG

    ## main loop
    s = 1000
    done = False
    while not done:
        key = cv2.waitKey(1) & 255
        ## Read keystrokes
        if key == 27:  # terminate
            print("\tESC key detected!")
            done = True
        elif chr(key) == 's':  # screen capture
            print("\ts key detected. Saving image {}".format(s))


            rgb = rgb[60:180, 80:240, :]
            dmap = dmap[60:180, 80:240]
            ply_content, points_content = generate_ply_from_rgbd(rgb=rgb, depth=dmap, config=CAMERA_CONFIG)

            cv2.imwrite(saving_folder_path + "RGB/" + str(s) + '.png', rgb)
            cv2.imwrite(saving_folder_path + "D/" + str(s) + '.png', dmap)
            print(rgb.shape, dmap.shape)
            print(type(rgb), type(dmap))
            with open(saving_folder_path + "PC/" + str(s) + '.ply', 'w') as output:
                output.write(ply_content)
            print(saving_folder_path + "PC/" + str(s) + '.ply', ' done')
            s += 1  # uncomment for multiple captures

            # ### Get pointcloud of scene for prediction
            # points_np = (np.array(points_content)[:, :3]).astype(np.float32)
            # choice = np.random.choice(len(points_np), num_points, replace=True)
            # points_np = points_np[choice, :]
            # points_torch = torch.from_numpy(points_np)
            #
            # points_torch = points_torch.transpose(1, 0).contiguous()
            #
            # points_torch = Variable(points_torch.view(1, points_torch.size()[0], points_torch.size()[1]))
            #
            # ### Predict to segment scene
            # pred, _ = classifier(points_torch)
            # pred_choice = pred.data.max(2)[1]
            # print(pred_choice)

        ## Streams
        # RGB
        rgb = get_rgb(rgb_stream=rgb_stream, h=h, w=w)

        # DEPTH
        dmap, d4d = get_depth(depth_stream=depth_stream, h=h, w=w)

        # canvas
        canvas = np.hstack((rgb, d4d))
        ## Display the stream syde-by-side
        cv2.imshow('depth || rgb', canvas)
    # end while

    ## Release resources
    cv2.destroyAllWindows()
    rgb_stream.stop()
    depth_stream.stop()
    openni2.unload()
    print("Terminated")
Exemplo n.º 7
0
                class_choice=opt.cat,
                train=False)

idx = opt.idx

print("model %d/%d" % (idx, len(d)))

point_np, seg = d[idx]
point = torch.from_numpy(point_np)
point_np[:, 2] *= -1

cmap = plt.cm.get_cmap("hsv", 5)
cmap = np.array([cmap(i) for i in range(10)])[:, :3]
gt = cmap[seg - 1, :]

classifier = PointNetDenseCls(num_points=num_points, k=num_classes)
classifier.load_state_dict(torch.load(opt.model))
classifier.eval()

point = point.transpose(1, 0).contiguous()

point = Variable(point.view(1, point.size()[0], point.size()[1]))
pred, _ = classifier(point)
# pred = pred.view(-1, num_classes)
pred_choice = pred.data.max(2)[1]
print(pred_choice)
correct = pred_choice.eq(torch.from_numpy(seg - 1)).cpu().sum()
print('Percent: {}'.format(float(correct) / num_points))

#print(pred_choice.size())
pred_color = cmap[pred_choice.numpy()[0], :]
Exemplo n.º 8
0
                train=False)

idx = opt.idx

print("model %d/%d" % (idx, len(d)))

point, seg = d[idx]
print(point.size(), seg.size())

point_np = point.numpy()

cmap = plt.cm.get_cmap("hsv", 10)
cmap = np.array([cmap(i) for i in range(10)])[:, :3]
gt = cmap[seg.numpy() - 1, :]

classifier = PointNetDenseCls(k=4)
classifier = nn.DataParallel(classifier, device_ids=[0, 1])
classifier.load_state_dict(torch.load(opt.model, map_location='cpu'))
classifier = classifier.module
print('model is loaded successfully!!!')
classifier.eval()

point = point.transpose(1, 0).contiguous()

point = Variable(point.view(1, point.size()[0], point.size()[1]))
pred, _ = classifier(point)
pred_choice = pred.data.max(2)[1]
print(pred_choice)

#print(pred_choice.size())
pred_color = cmap[pred_choice.numpy()[0], :]
Exemplo n.º 9
0
testdataloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=opt.batchSize,
                                             shuffle=True,
                                             num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
num_classes = dataset.num_seg_classes
print('classes', num_classes)
try:
    os.makedirs(opt.outf)
except OSError:
    pass

blue = lambda x: '\033[94m' + x + '\033[0m'

classifier = PointNetDenseCls(k=num_classes)

if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))

optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
# classifier.cuda()

num_batch = len(dataset) / opt.batchSize

for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader, 0):
        points, target = data
        points, target = Variable(points), Variable(target)
        points = points.transpose(2, 1)
        # points, target = points.cuda(), target.cuda()
Exemplo n.º 10
0
testdataloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=opt.batchSize,
                                             shuffle=True,
                                             num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
num_classes = dataset.num_seg_classes
print('classes', num_classes)
try:
    os.makedirs(opt.outf)  #创建 output的路径
except OSError:
    pass

blue = lambda x: '\033[94m' + x + '\033[0m'

classifier = PointNetDenseCls(k=num_classes)  #模型实例化, 每个点都label一个class

if opt.model != '':
    classifier.load_state_dict(torch.load(
        opt.model))  #载入预训练模型的参数the_model = TheModelClass(*args, **kwargs)
    #                 the_model.load_state_dict(torch.load(PATH)),opt.model是个path

optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
classifier.cuda()

num_batch = len(dataset) / opt.batchSize  #把data分成多个batch

for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader,
                             0):  #enumerate(dataloader,start= 0) 中0可省略
        points, target = data
Exemplo n.º 11
0
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
                                          shuffle=True, num_workers=int(opt.workers))

test_dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', classification = False, class_choice = ['Chair'], train = False)
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchSize,
                                          shuffle=True, num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
num_classes = dataset.num_seg_classes
print('classes', num_classes)
try:
    os.makedirs(opt.outf)
except OSError:
    pass

classifier = PointNetDenseCls(k = num_classes)

if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))
classifier.cuda()

num_batch = len(dataset)/opt.batchSize
learning_rate = 0.01

for epoch in range(opt.nepoch):
    optimizer = optim.SGD(classifier.parameters(), lr=learning_rate, momentum=0.9)
    for i, data in enumerate(testdataloader, 0):
        points, target, names = data
        
        points, target = Variable(points), Variable(target)
        points = points.transpose(2,1) 
Exemplo n.º 12
0
                            batch_size=opt.batchSize,
                            shuffle=True,
                            num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
num_classes = dataset.num_seg_classes
print('classes', num_classes)
try:
    os.makedirs(opt.outf)
except OSError:
    pass

blue = lambda x: '\033[94m' + x + '\033[0m'

ctx = mx.gpu()
classifier = PointNetDenseCls(k=num_classes, routing=1)
classifier.initialize(ctx=ctx)

if opt.model != '':
    classifier.load_parameters(opt.model)

optimizer = Trainer(classifier.collect_params(), 'sgd', {
    'learning_rate': 0.001,
    'momentum': 0.9
})
L_loss = loss.SoftmaxCrossEntropyLoss()

num_batch = len(dataset) / opt.batchSize

for epoch in range(opt.nepoch):
    correct = 0.
Exemplo n.º 13
0
idx = opt.idx

print("model %d/%d" %( idx, len(d)))

point, seg = d[idx]
print(point.size(), seg.size())

point_np = point.numpy()



cmap = plt.cm.get_cmap("hsv", 10)
cmap = np.array([cmap(i) for i in range(10)])[:,:3]
gt = cmap[seg.numpy() - 1, :]

classifier = PointNetDenseCls(k = 4)
classifier.load_state_dict(torch.load(opt.model))
classifier.eval()

point = point.transpose(1,0).contiguous()

point = Variable(point.view(1, point.size()[0], point.size()[1]))
pred, _ = classifier(point)
pred_choice = pred.data.max(2)[1]
print(pred_choice)

#print(pred_choice.size())
pred_color = cmap[pred_choice.numpy()[0], :]

#print(pred_color.shape)
showpoints(point_np, gt, pred_color)
# Setup training and validation data loaders.
flag_data_augmentation = False
dataset = HDF5Dataset(hdf5_file, data_splitter, 'training', flag_data_augmentation=flag_data_augmentation)

dataloader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=True)

NUM_POINTS = 40000
print(len(dataset))
num_classes = 2#3#dataset.num_seg_classes
print('classes', num_classes)
try:
    os.makedirs(opt.outdir)
except OSError:
    pass

classifier = PointNetDenseCls(k = num_classes, num_points=NUM_POINTS)

if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))

optimizer = optim.SGD(classifier.parameters(), lr=opt.lr, momentum=0.9)
classifier.cuda()
fig=plt.figure(1)
num_batch = len(dataset)/opt.batchSize
print 'num_batch = ', num_batch

loss_train=[]

for epoch in range(opt.nepoch):

    # adjust_learning_rate(optimizer, epoch)
                                             shuffle=True,
                                             num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
num_classes = dataset.num_seg_classes
print('classes', num_classes)
try:
    os.makedirs(opt.outf)
except OSError:
    pass

blue = lambda x: '\033[94m' + x + '\033[0m'

opt.devices = map(int, opt.devices)
print(opt.devices)
classifier = PointNetDenseCls(k=num_classes)
if opt.model != '':
    print("Finish Loading")
    classifier.load_state_dict(torch.load(opt.model))
classifier = nn.DataParallel(classifier, device_ids=opt.devices)

optimizer = optim.SGD(classifier.parameters(),
                      lr=opt.lr,
                      momentum=opt.momentum)
classifier.cuda()

num_batch = len(dataset) / opt.batchSize
miou_list = list()
for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader, 0):
        points, target = data
Exemplo n.º 16
0
test_dataset = PartDataset(root=os.path.join(opt.input_path, 'test'),
                           task='segmentation',
                           mode='test',
                           npoints=opt.num_points,
                           min_pts=0,
                           load_in_memory=True,
                           num_seg_class=5)
testdataloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=opt.batchSize,
                                             shuffle=False,
                                             num_workers=opt.workers)
num_batch = len(test_dataset) / opt.batchSize

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

classifier = PointNetDenseCls(k=test_dataset.num_seg_classes).to(device)

classifier.load_state_dict(torch.load(opt.model))
classifier.eval()

total_test_correct = 0
n_log = 100

total_points = 0
for i, data in enumerate(test_dataset):
    point, target = data
    point, target = point.to(device), target.to(device)
    point = point.view(1, point.size(0), point.size(1))
    target = target.view(1, target.size(0))
    point = point.transpose(2, 1)
    pred, _ = classifier(point)
Exemplo n.º 17
0
idx = opt.idx

print("model %d/%d" % (idx, len(dataset)))

point, cls, seg = dataset[idx]
damaged_point, pt_idx = get_damaged_points(point.numpy())
damaged_point = torch.from_numpy(damaged_point)
damaged_seg = seg[pt_idx]

original_point = point.numpy()
damaged_point_np = damaged_point.numpy()
print(point.size(), seg.size())

print('loading segmentation network for damaged data')
seg_classifier = PointNetDenseCls(k=dataset.num_seg_classes)
seg_classifier.load_state_dict(torch.load(opt.seg_model))

print('loading classification network for damaged data')
cls_classifier = PointNetCls(k=len(dataset.classes))
cls_classifier.load_state_dict(torch.load(opt.cls_model))

print('loading multi-task network for damaged data')
mt_classifier = PointNetMultiTask(cls_k=len(dataset.classes),
                                  seg_k=dataset.num_seg_classes)
mt_classifier.load_state_dict(torch.load(opt.mt_model))

print('loading segmentation network for non-damaged data')
seg_classifier_all = PointNetDenseCls(k=dataset.num_seg_classes)
seg_classifier_all.load_state_dict(torch.load(opt.seg_all_model))
Exemplo n.º 18
0
testdataloader = torch.utils.data.DataLoader(test_dataset,
                                             batch_size=opt.batchSize,
                                             shuffle=True,
                                             num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
num_classes = dataset.num_seg_classes
print('classes', num_classes)
try:
    os.makedirs(opt.outf)
except OSError:
    pass

blue = lambda x: '\033[94m' + x + '\033[0m'

classifier = PointNetDenseCls(k=num_classes, num_points=2500)

if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))

optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
classifier.cuda()

num_batch = len(dataset) / opt.batchSize

for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader, 0):
        points, target = data
        points, target = Variable(points), Variable(target)
        points = points.transpose(2, 1)
Exemplo n.º 19
0
    train=False)

idx = opt.idx

print("model %d/%d" % (idx, len(d)))

point, seg = d[idx]
print(point.shape, seg.shape)

point_np = point.asnumpy()
ctx = mx.gpu()

cmap = plt.cm.get_cmap("hsv", 10)
cmap = np.array([cmap(i) for i in range(10)])[:, :3]
gt = cmap[seg.asnumpy().astype(np.uint8) - 1, :]

classifier = PointNetDenseCls(k=4)
classifier.load_parameters(opt.model, ctx=ctx)

point = nd.expand_dims(point.transpose((1, 0)), axis=0)

pred, _ = classifier(point.as_in_context(ctx))
pred_choice = pred.argmax(2)
print(pred_choice)

#print(pred_choice.size())
pred_color = cmap[pred_choice.asnumpy().astype(np.uint8)[0], :]

#print(pred_color.shape)
showpoints(point_np, gt, pred_color)
Exemplo n.º 20
0
test_dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', npoints=num_points, classification=False, class_choice=['Chair'], train=False)
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=opt.batchSize,
                                          shuffle=True, num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
num_classes = 10
print('classes', num_classes)
try:
    os.makedirs(opt.outf)
except OSError:
    pass

blue = lambda x:'\033[94m' + x + '\033[0m'


classifier = PointNetDenseCls(num_points=num_points, k=num_classes)

if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))

optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
classifier.cuda()

num_batch = len(dataset)/opt.batchSize

max_acc = -1
for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader, 0):
        points, target = data
        points, target = Variable(points), Variable(target)
        points = points.transpose(2, 1)
Exemplo n.º 21
0
import torch.utils.data
from datasets import PartDataset
from pointnet import PointNetDenseCls


model_path = 'seg/weights/seg_model_9.pth'
output_path = 'seg/output/seg_model_9'
class_choice = 'Chair'
num_points = 2500
test_dataset = PartDataset(root='shapenetcore_partanno_segmentation_benchmark_v0',
                           classification=False, class_choice=[class_choice], train=False)
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=32,
                                             shuffle=False, num_workers=4)

num_classes = test_dataset.num_seg_classes
classifier = PointNetDenseCls(k=num_classes)
classifier.cuda()
classifier.load_state_dict(torch.load(model_path))
classifier.eval()

preds = []
labels = []

for data in testdataloader:
    with torch.no_grad():
        points, target = data
        points = points.transpose(2, 1)
        points, target = points.cuda(), target.cuda()
        pred, _ = classifier(points)
        pred = pred.view(-1, num_classes)
        target = target.view(-1) - 1
Exemplo n.º 22
0
                train=False)

idx = opt.idx

print("model %d/%d" % (idx, len(d)))

point, seg = d[idx]
print(point.size(), seg.size())

point_np = point.numpy()

cmap = plt.cm.get_cmap("hsv", 10)
cmap = np.array([cmap(i) for i in range(10)])[:, :3]
gt = cmap[seg.numpy() - 1, :]

classifier = PointNetDenseCls(k=4)
classifier.load_state_dict(torch.load(opt.model))
classifier.eval()

point = point.transpose(1, 0).contiguous()

point = Variable(point.view(1, point.size()[0], point.size()[1]))
pred, _ = classifier(point)
pred_choice = pred.data.max(2)[1]
# print(pred_choice)

#print(pred_choice.size())
pred_color = cmap[pred_choice.numpy()[0], :]

# print(point_np.shape)
# print(pred_color.shape)
Exemplo n.º 23
0
                          npoints = opt.num_points,
                          min_pts=0,
                          num_seg_class=5,
                          load_in_memory=True)
valdataloader = torch.utils.data.DataLoader(val_dataset, batch_size=opt.batchSize,
                                          shuffle=True, num_workers=int(opt.workers))

print('train: {} test: {}'.format(len(dataset), len(val_dataset)))
num_classes = dataset.num_seg_classes
print('classes', num_classes)


blue = lambda x:'\033[94m' + x + '\033[0m'


classifier = PointNetDenseCls(k = num_classes).to(device)

start_epoch=-1

if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))
    # TODO update start_epoch from pre-trained


optimizer = optim.SGD(params=filter(lambda p: p.requires_grad, classifier.parameters()),
                      lr=0.01,
                      momentum=0.9)
lambda_lr = lambda epoch: 1 / (1 + (opt.lr_decay_rate * epoch))
lr_scheduler = LambdaLR(optimizer, lr_lambda=lambda_lr, last_epoch=start_epoch)

num_batch = len(dataset)/opt.batchSize