Exemple #1
0
def main(argv=None):
    print('Hello! This is PointNet-Segmentation and Surface-Matching Program')

    opt = parser.parse_args()
    num_points = opt.np
    pretrained_model = opt.ptn
    num_classes = 3
    '''
    Load PointNet Model (model for point-wise classification)
    '''
    classifier = PointNetPointWise(num_points=num_points,
                                   num_classes=num_classes)
    classifier.load_state_dict(torch.load(pretrained_model))
    classifier.eval()
    '''
    Setup camera
    '''
    fps = 30
    # w = 1280 #640
    # h = 960 #480
    # fps = 30
    # # config crop area [h1:h2, w1:w2]
    # h1 = 240 #140
    # h2 = 720 #340
    # w1 = 320 #200
    # w2 = 960 #440
    # w, h, h1, h2, w1, w2 = (np.array([640, 480, 140, 340, 200, 440])).astype(int)
    w, h, h1, h2, w1, w2 = (np.array([640, 480, 140, 340, 200,
                                      440])).astype(int)

    rgb_stream, depth_stream = setup_camera(w=w, h=h, fps=fps)
    from config import CAMERA_CONFIG
    '''
    Record
    '''
    done = False
    while not done:
        key = cv2.waitKey(1) & 255
        ## Read keystrokes
        if key == 27:  # terminate
            print("\tESC key detected!")
            done = True
        elif chr(key) == 's':  # screen capture
            # print("\ts key detected. Saving image {}".format(s))
            '''
            Get data
            '''
            # rgb = rgb[h1:h2, w1:w2, :]
            # dmap = dmap[h1:h2, w1:w2]
            # pc_scene = rgbd2pc(rgb=rgb, depth=dmap, center_x=(w1+w2)/2, center_y=(h1+h2)/2, focal_length=889)
            # pc_scene = sample_data(point_cloud=pc_scene, num_points=num_points)
            # rgbd2pc(rgb, depth=dmap, center_x=325.5, center_y=255.5, focal_length=572, scale=2000)
            xyzrgbrc = rgbd2xyzrgbrc(rgb=rgb, depth=dmap, scale=1000)
            xyzrgbrc = xyzrgbrc[h1:h2, w1:w2, :]  # crop the interested area
            pc_scene = xyzrgb2pc(xyzrgb=xyzrgbrc)

            # ply = generate_ply(pc_scene)
            # with open('./tmp/abc' + '.ply', 'w') as output:
            #     output.write(ply)

            pc_scene = sample_data(point_cloud=pc_scene, num_points=num_points)

            # pc_scene = (np.array(pc_scene)[:, :3]).astype(np.float32)
            '''
            Predict and Segment objects
            '''
            pred_labels = predict_segmentation(classifier=classifier,
                                               input_points=pc_scene)
            visualize(x=pc_scene[:, 0],
                      y=pc_scene[:, 1],
                      z=pc_scene[:, 2],
                      label=pred_labels,
                      point_radius=0.0008)  # visualize predicted results
            pipe_points, wrench_points = visualize_segmented_objects(
                scene_points=pc_scene,
                labels=pred_labels)  # visualize segmented objects
            '''
            Surface-Matching
            '''
            # Convert numpy array to point cloud type
            pc_model = load_ply(path='./models/1.ply', num_points=-1)

            # pc_scene = wrench_points
            visualize(x=pc_model[:, 0],
                      y=pc_model[:, 1],
                      z=pc_model[:, 2],
                      label=np.ones(len(pc_model)),
                      point_radius=0.0008)
            visualize(x=pipe_points[:, 0],
                      y=pipe_points[:, 1],
                      z=pipe_points[:, 2],
                      label=np.ones(len(pipe_points)),
                      point_radius=0.0008)

            result_icp = match_surface(model_points=pc_model,
                                       object_points=pipe_points)

            # Transformation(Rotation angles)
            print('Theta x, Theta y, Theta z:(in Degree) ')
            print(
                rotationMatrixToEulerAngles(result_icp.transformation[:3, :3])
                / np.pi * 180)

        rgb, dmap = display_stream(rgb_stream=rgb_stream,
                                   depth_stream=depth_stream,
                                   h=h,
                                   w=w,
                                   crop=((w1, h1), (w2, h2)))
    # end while

    ## Release resources
    cv2.destroyAllWindows()
    rgb_stream.stop()
    depth_stream.stop()
    openni2.unload()
    print("Terminated")
def main(argv=None):
    print('Hello! This is PointNet-Segmentation and Surface-Matching Program')

    opt = parser.parse_args()
    num_points = opt.np
    pretrained_model = opt.ptn
    num_classes = 3
    '''
    Load PointNet Model (model for point-wise classification)
    '''
    classifier = PointNetPointWise(num_points=num_points,
                                   num_classes=num_classes)
    classifier.load_state_dict(torch.load(pretrained_model))
    classifier.eval()
    '''
    Setup camera
    '''
    fps = 30
    w, h, h1, h2, w1, w2 = (np.array([640, 480, 150, 330, 210,
                                      430])).astype(int)

    rgb_stream, depth_stream = setup_camera(w=w, h=h, fps=fps)

    # Load model
    pc_model = load_ply(path='./models/model13_1.ply', num_points=-1)
    '''
    Record
    '''
    done = False
    while not done:
        key = cv2.waitKey(1) & 255
        ## Read keystrokes
        if key == 27:  # terminate
            print("\tESC key detected!")
            done = True
        elif chr(key) == 's':  # screen capture
            '''
            Get data
            '''
            # sTime = time.time()
            # xyzrgbrc1 = rgbd2xyzrgbrc(rgb=rgb, depth=dmap, scale=1000)
            # xyzrgbrc1 = xyzrgbrc1[h1:h2, w1:w2, :] # crop the interested area
            # print(time.time() - sTime)

            sTime = time.time()
            xyzrgbrc2 = rgbd2xyzrgbrc(rgb=rgb[h1:h2, w1:w2, :],
                                      depth=dmap[h1:h2, w1:w2],
                                      scale=1000)
            print(time.time() - sTime)

            pc_scene = xyzrgb2pc(xyzrgb=xyzrgbrc)
            pc_scene = sample_data(point_cloud=pc_scene, num_points=num_points)
            '''
            Predict and Segment objects
            '''
            pred_labels = predict_segmentation(classifier=classifier,
                                               input_points=pc_scene)
            visualize(x=pc_scene[:, 0],
                      y=pc_scene[:, 1],
                      z=pc_scene[:, 2],
                      label=pred_labels,
                      point_radius=0.0008)  # visualize predicted results
            pipe_points, wrench_points = visualize_segmented_objects(
                scene_points=pc_scene,
                labels=pred_labels)  # visualize segmented objects
            '''
            Surface-Matching
            '''
            # pc_scene = wrench_points
            visualize(x=pc_model[:, 0],
                      y=pc_model[:, 1],
                      z=pc_model[:, 2],
                      label=np.ones(len(pc_model)),
                      point_radius=0.0008)
            visualize(x=pipe_points[:, 0],
                      y=pipe_points[:, 1],
                      z=pipe_points[:, 2],
                      label=np.ones(len(pipe_points)),
                      point_radius=0.0008)

            result_icp = match_surface(model_points=pc_model,
                                       object_points=pipe_points)

            # Transformation(Rotation angles)
            print('Theta x, Theta y, Theta z:(in Degree) ')
            print(
                rotationMatrixToEulerAngles(result_icp.transformation[:3, :3])
                / np.pi * 180)

        rgb, dmap = display_stream(rgb_stream=rgb_stream,
                                   depth_stream=depth_stream,
                                   h=h,
                                   w=w,
                                   crop=((w1, h1), (w2, h2)))
    # end while

    ## Release resources
    cv2.destroyAllWindows()
    rgb_stream.stop()
    depth_stream.stop()
    openni2.unload()
    print("Terminated")
def main(argv=None):
    print('Hello! This is PointNet-Segmentation and Surface-Matching Program')

    opt = parser.parse_args()
    num_points = opt.np
    pretrained_model = opt.ptn
    num_classes = 4

    '''
    Load PointNet Model (model for point-wise classification)
    '''
    classifier = PointNet(num_points=num_points, num_classes=num_classes)
    classifier.load_weights(filepath='./tmp/model.acc.53.hdf5')

    '''
    Setup camera
    '''
    fps = 30
    w, h, h1, h2, w1, w2 = (np.array([640, 480, 150, 330, 210, 430])).astype(int)

    rgb_stream, depth_stream = setup_camera(w=w, h=h, fps=fps)

    # Load model
    pc_models = []
    centroid_models = []
    files = os.listdir('./models/pipe/pc/')
    for i, file in enumerate(files):
        pc = load_ply(path='./models/pipe/pc/' + file, num_points=-1)
        centroid = np.mean(pc, axis=0)

        pc_models.append(pc)
        centroid_models.append(centroid)

    '''
    Record
    '''
    done = False
    while not done:
        key = cv2.waitKey(1) & 255
        ## Read keystrokes
        if key == 27:  # terminate
            print("\tESC key detected!")
            done = True
        elif chr(key) == 's':  # screen capture
            '''
            Get data
            '''
            # sTime = time.time()
            # xyzrgbrc1 = rgbd2xyzrgbrc(rgb=rgb, depth=dmap, scale=1000)
            # xyzrgbrc1 = xyzrgbrc1[h1:h2, w1:w2, :] # crop the interested area
            # print(time.time() - sTime)

            sTime = time.time()
            xyzrgbrc = rgbd2xyzrgbrc(rgb=rgb[h1:h2, w1:w2, :], depth=dmap[h1:h2, w1:w2], scale=1000)
            print('Converting time from rgb-d to xyz: ', time.time() - sTime)

            sTime = time.time()
            pc_scene = xyzrgb2pc(xyzrgb=xyzrgbrc)
            print('Converting time from xyz to point cloud: ', time.time() - sTime)

            pipe = []
            for i in range(2):
                pc_scene = sample_data(point_cloud=pc_scene, num_points=num_points)

                '''
                Predict and Segment objects
                '''
                pred_labels = predict_segmentation(classifier=classifier, input_points=pc_scene)
                # visualize(x=pc_scene[:, 0], y=pc_scene[:, 1], z=pc_scene[:, 2], label=pred_labels,
                #           point_radius=0.0008)  # visualize predicted results
                pipe_points, wrench_points = visualize_segmented_objects(scene_points=pc_scene,
                                                                         labels=pred_labels)  # visualize segmented objects
                if pipe_points != []:
                    pipe.append(pipe_points)
            pipe = np.concatenate((pipe[0], pipe[1]), axis=0)
            visualize(x=pipe[:, 0], y=pipe[:, 1], z=pipe[:, 2], label=np.ones(len(pipe)), point_radius=0.0008)

            if len(pipe) < 100: continue
            result = []
            for i in range(len(centroid_models)):
                print('Attempt fitting model: ', i)
                pc_model = pc_models[i]
                centroid_model = centroid_models[i]

                '''
                Surface-Matching
                '''
                # pc_scene = wrench_points
                # visualize(x=pc_model[:, 0], y=pc_model[:, 1], z=pc_model[:, 2], label=np.ones(len(pc_model)), point_radius=0.0008)
                # visualize(x=pipe[:, 0], y=pipe[:, 1], z=pipe[:, 2], label=np.ones(len(pipe)), point_radius=0.0008)

                result_icp, result_ransac = match_surface(model_points=pc_model, object_points=pipe)

                if (result==[]) or (result_ransac.fitness> result[0].fitness):
                    result = [result_ransac, result_icp, centroid_model, i]

            if result == []: continue
            # Transformation(Rotation angles)
            print('Best fitness: ')
            print(result[0].fitness, result[3])
            print('Theta x, Theta y, Theta z:(in Degree) ')
            print(rotationMatrixToEulerAngles(result[1].transformation[:3, :3]) / np.pi * 180)
            print('Translation:(in Meters) ')
            print(result[1].transformation[:3, 3], result[2])
            source = PointCloud()
            source.points = Vector3dVector(pc_models[result[3]])
            target = PointCloud()
            target.points = Vector3dVector(pipe)
            draw_registration_result(source, target, result[1].transformation)

            trans_vector = centroid_models[result[3]] - centroid_models[10] + result[1].transformation[:3, 3]
            grasp_pose = np.array([171.14, 271.31, -16]) + trans_vector


        rgb, dmap = display_stream(rgb_stream=rgb_stream, depth_stream=depth_stream, h=h, w=w, crop=((w1, h1), (w2, h2)))
    # end while

    ## Release resources
    cv2.destroyAllWindows()
    rgb_stream.stop()
    depth_stream.stop()
    openni2.unload()
    print("Terminated")
Exemple #4
0
def main(argv=None):
    print('Hello! This is PointNet-Segmentation and Surface-Matching Program')

    opt = parser.parse_args()
    num_points = opt.np
    pretrained_model = opt.ptn
    num_classes = 4
    '''
    Load PointNet Model (model for point-wise classification)
    '''
    classifier = PointNet(num_points=num_points, num_classes=num_classes)
    classifier.load_weights(filepath='./tmp/model.acc.53.hdf5')
    '''
    Setup camera
    '''
    fps = 30
    w, h, h1, h2, w1, w2 = (np.array([640, 480, 150, 330, 210,
                                      430])).astype(int)

    rgb_stream, depth_stream = setup_camera(w=w, h=h, fps=fps)

    from threefinger_command_sender_v0200 import ROBOTIQ, ThreeFingerModes
    from robot_command_sender_v0200 import TX60

    tx60 = TX60()
    tx60.enable()
    tx60.move_point(x=-400, y=40, z=60, rx=-180, ry=0, rz=0)
    tx60.move_dimension(z=20)
    robotiq = ROBOTIQ()
    robotiq.enable()
    robotiq.set_mode(1)

    # Load model
    pc_models = []
    centroid_models = []
    grasping_poses = []
    files = os.listdir('./models/pipe/pc/')
    for i, file in enumerate(files):
        filename, ext = os.path.splitext(file)

        pc_model = load_ply(path='./models/pipe/pc/' + file, num_points=-1)
        centroid, grasping_pose = np.load('./models/pipe/info/' + filename +
                                          '.npy')

        pc_models.append(pc_model)
        centroid_models.append(centroid)
        grasping_poses.append(grasping_pose)
    '''
    Record
    '''
    done = False
    while not done:
        key = cv2.waitKey(1) & 255
        ## Read keystrokes
        if key == 27:  # terminate
            print("\tESC key detected!")
            done = True
        elif chr(key) == 's':  # screen capture
            '''
            Get data
            '''
            # sTime = time.time()
            # xyzrgbrc1 = rgbd2xyzrgbrc(rgb=rgb, depth=dmap, scale=1000)
            # xyzrgbrc1 = xyzrgbrc1[h1:h2, w1:w2, :] # crop the interested area
            # print(time.time() - sTime)

            sTime = time.time()
            xyzrgbrc = rgbd2xyzrgbrc(rgb=rgb[h1:h2, w1:w2, :],
                                     depth=dmap[h1:h2, w1:w2],
                                     depthStream=depth_stream,
                                     scale=1000)
            # xyzrgbrc = rgbd2xyzrgbrc(rgb=rgb, depth=dmap, depthStream=depth_stream, scale=1000)
            print('Converting time from rgb-d to xyz: ', time.time() - sTime)

            sTime = time.time()
            pc_scene = xyzrgb2pc(xyzrgb=xyzrgbrc)
            print('Converting time from xyz to point cloud: ',
                  time.time() - sTime)

            pipe = []
            for i in range(2):
                pc_scene = sample_data(point_cloud=pc_scene,
                                       num_points=num_points)
                '''
                Predict and Segment objects
                '''
                pred_labels = predict_segmentation(classifier=classifier,
                                                   input_points=pc_scene)
                # visualize(x=pc_scene[:, 0], y=pc_scene[:, 1], z=pc_scene[:, 2], label=pred_labels,
                #           point_radius=0.0008)  # visualize predicted results
                pipe_points, wrench_points = visualize_segmented_objects(
                    scene_points=pc_scene,
                    labels=pred_labels)  # visualize segmented objects
                if pipe_points != []:
                    pipe.append(pipe_points)
            pipe = np.concatenate((pipe[0], pipe[1]), axis=0)
            visualize(x=pipe[:, 0],
                      y=pipe[:, 1],
                      z=pipe[:, 2],
                      label=np.ones(len(pipe)),
                      point_radius=0.0008)

            if len(pipe) < 100: continue
            result = []
            # for i in range(len(centroid_models)):
            for i in range(1):
                i = 10
                print('Attempt fitting model: ', i)
                pc_model = pc_models[i]
                centroid_model = centroid_models[i]
                '''
                Surface-Matching
                '''
                # pc_scene = wrench_points
                # visualize(x=pc_model[:, 0], y=pc_model[:, 1], z=pc_model[:, 2], label=np.ones(len(pc_model)), point_radius=0.0008)
                # visualize(x=pipe[:, 0], y=pipe[:, 1], z=pipe[:, 2], label=np.ones(len(pipe)), point_radius=0.0008)

                result_icp, result_ransac = match_surface(
                    model_points=pc_model, object_points=pipe)

                # if (result==[]) or (result_ransac.fitness > result[0].fitness):
                if (result == []) or (length_vector(
                        result_icp.transformation[:3, 3]) < result[4]):
                    result = [
                        result_ransac, result_icp, centroid_model, i,
                        length_vector(result_icp.transformation[:3, 3])
                    ]

            if result == []: continue
            # Transformation(Rotation angles)
            print('Best fitness: ')
            print(result[0].fitness)
            print('Theta x, Theta y, Theta z:(in Degree) ')
            print(
                rotationMatrixToEulerAngles(result[1].transformation[:3, :3]) /
                np.pi * 180)
            print('Best fitted model: ', result[3], ' Model centroid: ',
                  result[2] * 1000)
            print('Translation:(in miliMeters) ',
                  result[1].transformation[:3, 3] * 1000)
            source = PointCloud()
            source.points = Vector3dVector(pc_models[result[3]])
            target = PointCloud()
            target.points = Vector3dVector(pipe)
            # draw_registration_result(source, target, result[1].transformation)

            translation = result[1].transformation[:2, 3] * 1000
            translation = translation[::-1]
            grasping_pose = grasping_poses[result[3]][0, :2] + translation
            if grasping_pose[0] > 370: continue
            if grasping_pose[1] > 370: continue
            # grasping_pose = grasping_poses[result[3]][0, :3].astype(np.float64)
            rotate_angle = rotationMatrixToEulerAngles(
                result[1].transformation[:3, :3]) / np.pi * 180
            tx60.enable()
            tx60.move_dimension(z=60)
            tx60.move_dimension(rz=rotate_angle[2])
            tx60.move_point(x=grasping_pose[0],
                            y=grasping_pose[1],
                            z=60,
                            rx=-180,
                            ry=0,
                            rz=rotate_angle[2])
            tx60.move_dimension(z=-20)
            robotiq.move(200)
            tx60.move_dimension(z=60)
            tx60.move_point(x=-400, y=40, z=60, rx=-180, ry=0, rz=0)
            tx60.move_dimension(z=10)
            robotiq.move(40)
            tx60.disable()

        rgb, dmap = display_stream(rgb_stream=rgb_stream,
                                   depth_stream=depth_stream,
                                   h=h,
                                   w=w,
                                   crop=((w1, h1), (w2, h2)))
    # end while

    ## Release resources
    robotiq.disable()
    tx60.disable()
    cv2.destroyAllWindows()
    rgb_stream.stop()
    depth_stream.stop()
    openni2.unload()
    print("Terminated")
def main(argv=None):
    print('Hello! This is PointNet-Segmentation and Surface-Matching Program')

    opt = parser.parse_args()
    num_points = opt.np
    pretrained_model = opt.ptn
    idx = opt.idx
    root = 'E:/PROJECTS/NTUT/PointNet/pointnet1_pytorch/DATA/Shapenet/shapenetcore_partanno_segmentation_benchmark_v0'
    num_classes = 3
    '''
    Load PointNet Model (model for point-wise classification)
    '''
    classifier = PointNetPointWise(num_points=num_points,
                                   num_classes=num_classes)
    classifier.load_state_dict(torch.load(pretrained_model))
    classifier.eval()
    '''
    Load data
    '''
    # data_tes = PartDataset(root=root, num_points=num_points, categories=['tools'], training=True, balanced=True,
    #                        shuffle=True, seed=0, offset=0)
    #
    # points, labels = data_tes[idx] # Load data points and ground truth labels
    # visualize(x=points[:, 0], y=points[:, 1], z=points[:, 2], label=labels, point_radius=0.0008) # visualize ground truth

    pc_scene = load_ply(path='./models/model5/PC/pipe/1001.ply',
                        num_points=4096)
    pc_scene = pc_scene.astype(np.float32)
    '''
    Predict and Segment objects
    '''
    pred_labels = predict_segmentation(classifier=classifier,
                                       input_points=pc_scene)
    visualize(x=pc_scene[:, 0],
              y=pc_scene[:, 1],
              z=pc_scene[:, 2],
              label=pred_labels,
              point_radius=0.0008)  # visualize predicted results

    pipe_points, wrench_points = visualize_segmented_objects(
        scene_points=pc_scene, labels=pred_labels)
    '''
    Surface-Matching
    '''
    # Convert numpy array to point cloud type
    pc_model = load_ply(path='./models/model5/PC/pipe/1002.ply', num_points=-1)

    # pc_scene = wrench_points
    visualize(x=pc_model[:, 0],
              y=pc_model[:, 1],
              z=pc_model[:, 2],
              label=np.ones(len(pc_model)),
              point_radius=0.0008)
    visualize(x=pipe_points[:, 0],
              y=pipe_points[:, 1],
              z=pipe_points[:, 2],
              label=np.ones(len(pipe_points)),
              point_radius=0.0008)

    # result_icp = match_surface(source_points=pc_model, target_points=pipe_points); print(result_icp.transformation[:3, 3])
    result_icp = match_surface(source_points=pc_model, target_points=pc_scene)
    print(result_icp.transformation[:3, 3])

    # Transformation(Rotation angles)
    print('Theta x, Theta y, Theta z:(in Degree) ')
    print(
        rotationMatrixToEulerAngles(result_icp.transformation[:3, :3]) /
        np.pi * 180)