コード例 #1
0
idx = opt.idx

print("model %d/%d" % (idx, len(d)))

point, seg = d[idx]
print(point.size(), seg.size())

point_np = point.numpy()

cmap = plt.cm.get_cmap("hsv", 10)
cmap = np.array([cmap(i) for i in range(10)])[:, :3]
gt = cmap[seg.numpy() - 1, :]

classifier = PointNetDenseCls(k=4)
classifier.load_state_dict(torch.load(opt.model))
classifier.eval()

point = point.transpose(1, 0).contiguous()

point = Variable(point.view(1, point.size()[0], point.size()[1]))
pred, _ = classifier(point)
pred_choice = pred.data.max(2)[1]
# print(pred_choice)

#print(pred_choice.size())
pred_color = cmap[pred_choice.numpy()[0], :]

# print(point_np.shape)
# print(pred_color.shape)
pred_color = pred_color * 255
コード例 #2
0
idx = opt.idx

print("model %d/%d" % (idx, len(d)))

point, seg = d[idx]
print(point.size(), seg.size())

point_np = point.numpy()

cmap = plt.cm.get_cmap("hsv", 10)
cmap = np.array([cmap(i) for i in range(10)])[:, :3]
gt = cmap[seg.numpy() - 1, :]

classifier = PointNetDenseCls(k=4)
classifier = nn.DataParallel(classifier, device_ids=[0, 1])
classifier.load_state_dict(torch.load(opt.model, map_location='cpu'))
classifier = classifier.module
print('model is loaded successfully!!!')
classifier.eval()

point = point.transpose(1, 0).contiguous()

point = Variable(point.view(1, point.size()[0], point.size()[1]))
pred, _ = classifier(point)
pred_choice = pred.data.max(2)[1]
print(pred_choice)

#print(pred_choice.size())
pred_color = cmap[pred_choice.numpy()[0], :]

#print(pred_color.shape)
コード例 #3
0
def main(argv=None):
    print('Hello! This is XXXXXX Program')

    ## Load PointNet config
    parser = argparse.ArgumentParser()
    parser.add_argument('--model', type=str, default='./seg/seg_model_1.pth', help='model path')
    opt = parser.parse_args()
    print(opt)

    ## Load PointNet model
    num_points = 2700
    classifier = PointNetDenseCls(num_points=num_points, k=10)
    classifier.load_state_dict(torch.load(opt.model))
    classifier.eval()

    ### Config visualization
    cmap = plt.cm.get_cmap("hsv", 5)
    cmap = np.array([cmap(i) for i in range(10)])[:, :3]
    # gt = cmap[seg - 1, :]


    ## Initialize OpenNi
    # dist = './driver/OpenNI-Linux-x64-2.3/Redist'
    dist = './driver/OpenNI-Windows-x64-2.3/Redist'
    openni2.initialize(dist)
    if (openni2.is_initialized()):
        print("openNI2 initialized")
    else:
        print("openNI2 not initialized")

    ## Register the device
    dev = openni2.Device.open_any()

    ## Create the streams stream
    rgb_stream = dev.create_color_stream()
    depth_stream = dev.create_depth_stream()

    ## Define stream parameters
    w = 320
    h = 240
    fps = 30

    ## Configure the rgb_stream -- changes automatically based on bus speed
    rgb_stream.set_video_mode(
        c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_RGB888, resolutionX=w, resolutionY=h,
                           fps=fps))

    ## Configure the depth_stream -- changes automatically based on bus speed
    # print 'Depth video mode info', depth_stream.get_video_mode() # Checks depth video configuration
    depth_stream.set_video_mode(
        c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=w, resolutionY=h,
                           fps=fps))

    ## Check and configure the mirroring -- default is True
    ## Note: I disable mirroring
    # print 'Mirroring info1', depth_stream.get_mirroring_enabled()
    depth_stream.set_mirroring_enabled(False)
    rgb_stream.set_mirroring_enabled(False)

    ## Start the streams
    rgb_stream.start()
    depth_stream.start()

    ## Synchronize the streams
    dev.set_depth_color_sync_enabled(True)  # synchronize the streams

    ## IMPORTANT: ALIGN DEPTH2RGB (depth wrapped to match rgb stream)
    dev.set_image_registration_mode(openni2.IMAGE_REGISTRATION_DEPTH_TO_COLOR)

    saving_folder_path = './shapenetcore_partanno_segmentation_benchmark_v0/tools/'
    if not os.path.exists(saving_folder_path):
        os.makedirs(saving_folder_path+'RGB')
        os.makedirs(saving_folder_path+'D')
        os.makedirs(saving_folder_path+'PC')
        os.makedirs(saving_folder_path+'points')
        os.makedirs(saving_folder_path+'points_label')

    from config import CAMERA_CONFIG

    ## main loop
    s = 1000
    done = False
    while not done:
        key = cv2.waitKey(1) & 255
        ## Read keystrokes
        if key == 27:  # terminate
            print("\tESC key detected!")
            done = True
        elif chr(key) == 's':  # screen capture
            print("\ts key detected. Saving image {}".format(s))


            rgb = rgb[60:180, 80:240, :]
            dmap = dmap[60:180, 80:240]
            ply_content, points_content = generate_ply_from_rgbd(rgb=rgb, depth=dmap, config=CAMERA_CONFIG)

            cv2.imwrite(saving_folder_path + "RGB/" + str(s) + '.png', rgb)
            cv2.imwrite(saving_folder_path + "D/" + str(s) + '.png', dmap)
            print(rgb.shape, dmap.shape)
            print(type(rgb), type(dmap))
            with open(saving_folder_path + "PC/" + str(s) + '.ply', 'w') as output:
                output.write(ply_content)
            print(saving_folder_path + "PC/" + str(s) + '.ply', ' done')
            s += 1  # uncomment for multiple captures

            # ### Get pointcloud of scene for prediction
            # points_np = (np.array(points_content)[:, :3]).astype(np.float32)
            # choice = np.random.choice(len(points_np), num_points, replace=True)
            # points_np = points_np[choice, :]
            # points_torch = torch.from_numpy(points_np)
            #
            # points_torch = points_torch.transpose(1, 0).contiguous()
            #
            # points_torch = Variable(points_torch.view(1, points_torch.size()[0], points_torch.size()[1]))
            #
            # ### Predict to segment scene
            # pred, _ = classifier(points_torch)
            # pred_choice = pred.data.max(2)[1]
            # print(pred_choice)

        ## Streams
        # RGB
        rgb = get_rgb(rgb_stream=rgb_stream, h=h, w=w)

        # DEPTH
        dmap, d4d = get_depth(depth_stream=depth_stream, h=h, w=w)

        # canvas
        canvas = np.hstack((rgb, d4d))
        ## Display the stream syde-by-side
        cv2.imshow('depth || rgb', canvas)
    # end while

    ## Release resources
    cv2.destroyAllWindows()
    rgb_stream.stop()
    depth_stream.stop()
    openni2.unload()
    print("Terminated")
コード例 #4
0
def main(argv=None):
    print('Hello! This is XXXXXX Program')

    num_points = 2048
    dataset = PartDataset(root='DATA/ARLab/objects',
                          npoints=num_points,
                          classification=False,
                          class_choice=['pipe'])
    # dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=False, num_workers=int(opt.workers))
    dataloader = DataLoader(dataset,
                            batch_size=opt.batchSize,
                            shuffle=False,
                            num_workers=int(opt.workers))

    test_dataset = PartDataset(root='DATA/ARLab/objects',
                               npoints=num_points,
                               classification=False,
                               class_choice=['pipe'])
    testdataloader = torch.utils.data.DataLoader(test_dataset,
                                                 batch_size=opt.batchSize,
                                                 shuffle=True,
                                                 num_workers=int(opt.workers))

    num_classes = dataset.num_seg_classes

    blue = lambda x: '\033[94m' + x + '\033[0m'

    classifier = PointNetDenseCls(num_points=num_points, k=num_classes)

    if opt.model != '':
        classifier.load_state_dict(torch.load(opt.model))

    optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
    classifier.cuda()

    num_batch = len(dataset) / opt.batchSize

    for epoch in range(opt.nepoch):
        for i, data in enumerate(dataloader, 0):
            points, target = data
            points, target = Variable(points), Variable(target)
            points = points.transpose(2, 1)
            points, target = points.cuda(), target.cuda()
            optimizer.zero_grad()
            print(points.shape)
            pred, _ = classifier(points)
            pred = pred.view(-1, num_classes)
            target = target.view(-1, 1)[:, 0] - 1
            # print(pred.size(), target.size())
            loss = F.nll_loss(pred, target)
            loss.backward()
            optimizer.step()
            pred_choice = pred.data.max(1)[1]
            correct = pred_choice.eq(target.data).cpu().sum()
            print('[%d: %d/%d] train loss: %f accuracy: %f' %
                  (epoch, i, num_batch, loss.item(),
                   correct.item() / float(list(target.shape)[0])))

            if i % 10 == 0:
                j, data = next(enumerate(testdataloader, 0))
                points, target = data
                points, target = Variable(points), Variable(target)
                points = points.transpose(2, 1)
                points, target = points.cuda(), target.cuda()
                pred, _ = classifier(points)
                pred = pred.view(-1, num_classes)
                target = target.view(-1, 1)[:, 0] - 1

                loss = F.nll_loss(pred, target)
                pred_choice = pred.data.max(1)[1]
                correct = pred_choice.eq(target.data).cpu().sum()
                print('[%d: %d/%d] %s loss: %f accuracy: %f' %
                      (epoch, i, num_batch, blue('test'), loss.item(),
                       correct.item() / float(list(target.shape)[0])))

        torch.save(classifier.state_dict(),
                   '%s/seg_model_%d.pth' % (opt.outf, epoch))
コード例 #5
0
print(len(dataset), len(test_dataset))
num_classes = dataset.num_seg_classes
print('classes', num_classes)
try:
    os.makedirs(opt.outf)
except OSError:
    pass

blue = lambda x:'\033[94m' + x + '\033[0m'


classifier = PointNetDenseCls(k = num_classes)

if opt.model != '':
    classifier.load_state_dict(torch.load(opt.model))

optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
classifier.cuda()

num_batch = len(dataset)/opt.batchSize

for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader, 0):
        points, target = data
        points, target = Variable(points), Variable(target)
        points = points.transpose(2,1) 
        points, target = points.cuda(), target.cuda()   
        optimizer.zero_grad()
        classifier = classifier.train()
        pred, _ = classifier(points)
コード例 #6
0
from pointnet import PointNetDenseCls


model_path = 'seg/weights/seg_model_9.pth'
output_path = 'seg/output/seg_model_9'
class_choice = 'Chair'
num_points = 2500
test_dataset = PartDataset(root='shapenetcore_partanno_segmentation_benchmark_v0',
                           classification=False, class_choice=[class_choice], train=False)
testdataloader = torch.utils.data.DataLoader(test_dataset, batch_size=32,
                                             shuffle=False, num_workers=4)

num_classes = test_dataset.num_seg_classes
classifier = PointNetDenseCls(k=num_classes)
classifier.cuda()
classifier.load_state_dict(torch.load(model_path))
classifier.eval()

preds = []
labels = []

for data in testdataloader:
    with torch.no_grad():
        points, target = data
        points = points.transpose(2, 1)
        points, target = points.cuda(), target.cuda()
        pred, _ = classifier(points)
        pred = pred.view(-1, num_classes)
        target = target.view(-1) - 1
        labels.append(target.data)
        pred = pred.data.max(1)[1].view(-1)
コード例 #7
0
idx = opt.idx

print("model %d/%d" % (idx, len(dataset)))

point, cls, seg = dataset[idx]
damaged_point, pt_idx = get_damaged_points(point.numpy())
damaged_point = torch.from_numpy(damaged_point)
damaged_seg = seg[pt_idx]

original_point = point.numpy()
damaged_point_np = damaged_point.numpy()
print(point.size(), seg.size())

print('loading segmentation network for damaged data')
seg_classifier = PointNetDenseCls(k=dataset.num_seg_classes)
seg_classifier.load_state_dict(torch.load(opt.seg_model))

print('loading classification network for damaged data')
cls_classifier = PointNetCls(k=len(dataset.classes))
cls_classifier.load_state_dict(torch.load(opt.cls_model))

print('loading multi-task network for damaged data')
mt_classifier = PointNetMultiTask(cls_k=len(dataset.classes),
                                  seg_k=dataset.num_seg_classes)
mt_classifier.load_state_dict(torch.load(opt.mt_model))

print('loading segmentation network for non-damaged data')
seg_classifier_all = PointNetDenseCls(k=dataset.num_seg_classes)
seg_classifier_all.load_state_dict(torch.load(opt.seg_all_model))

print('loading classification network for non-damaged data')
コード例 #8
0
ファイル: train_segmentation.py プロジェクト: yl3800/pointnet
                                             num_workers=int(opt.workers))

print(len(dataset), len(test_dataset))
num_classes = dataset.num_seg_classes
print('classes', num_classes)
try:
    os.makedirs(opt.outf)  #创建 output的路径
except OSError:
    pass

blue = lambda x: '\033[94m' + x + '\033[0m'

classifier = PointNetDenseCls(k=num_classes)  #模型实例化, 每个点都label一个class

if opt.model != '':
    classifier.load_state_dict(torch.load(
        opt.model))  #载入预训练模型的参数the_model = TheModelClass(*args, **kwargs)
    #                 the_model.load_state_dict(torch.load(PATH)),opt.model是个path

optimizer = optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
classifier.cuda()

num_batch = len(dataset) / opt.batchSize  #把data分成多个batch

for epoch in range(opt.nepoch):
    for i, data in enumerate(dataloader,
                             0):  #enumerate(dataloader,start= 0) 中0可省略
        points, target = data
        points, target = Variable(points), Variable(target)
        points = points.transpose(2, 1)  #??? shape=nx3x1?nx1x3
        points, target = points.cuda(), target.cuda()
        optimizer.zero_grad()  # 初始化grad=0