def grasp():
    # Load image
    logging.info('Loading image...')
    pic = Image.open('rgb.png', 'r')
    rgb = np.array(pic)
    pic = Image.open('depth.png', 'r')
    depth = np.expand_dims(np.array(pic), axis=2)

    # Load Network
    logging.info('Loading model...')
    net = torch.load(args.network, map_location=torch.device('cpu'))
    logging.info('Done')

    # Get the compute device
    device = get_device(False)

    img_data = CameraData(include_depth=1, include_rgb=1)

    x, depth_img, rgb_img = img_data.get_data(rgb=rgb, depth=depth)

    with torch.no_grad():
        xc = x.to(device)
        pred = net.predict(xc)

        q_img, ang_img, width_img = post_process_output(
            pred['pos'], pred['cos'], pred['sin'], pred['width'])

        if args.save:
            save_results(rgb_img=img_data.get_rgb(rgb, False),
                         depth_img=np.squeeze(img_data.get_depth(depth)),
                         grasp_q_img=q_img,
                         grasp_angle_img=ang_img,
                         no_grasps=args.n_grasps,
                         grasp_width_img=width_img)
        else:
            fig = plt.figure(figsize=(12, 3))
            gs = plot_results(fig=fig,
                              rgb_img=img_data.get_rgb(rgb, False),
                              grasp_q_img=q_img,
                              grasp_angle_img=ang_img,
                              no_grasps=args.n_grasps,
                              grasp_width_img=width_img)
            fig.savefig('img_result.png')
def predict_grasp_angle(network, rgb_path, depth_path):
    #args = parse_args()

    #network = "trained-models/cornell-randsplit-rgbd-grconvnet3-drop1-ch16/epoch_30_iou_0.97"
    #rgb_path = "C:/Users/yashs/OneDrive/Desktop/PS simulation/rgbd_images/color8.jpeg"
    #depth_path = "C:/Users/yashs/OneDrive/Desktop/PS simulation/rgbd_images/depth8.jpeg"
    use_depth = 1
    use_rgb = 1 
    n_grasps = 1
    save = 0
    force_cpu = False

    # Load image
    logging.info('Loading image...')
    pic = Image.open(rgb_path, 'r')
    rgb = np.array(pic)
    pic = Image.open(depth_path, 'r')
    depth = np.expand_dims(np.array(pic), axis=2)

    # Load Network
    logging.info('Loading model...')
    net = torch.load(network,map_location=torch.device('cpu'))
    logging.info('Done')

    # Get the compute device
    device = get_device(force_cpu)

    img_data = CameraData(include_depth=use_depth, include_rgb=use_rgb)

    x, depth_img, rgb_img = img_data.get_data(rgb=rgb, depth=depth)

    with torch.no_grad():
        xc = x.to(device)
        pred = net.predict(xc)

        q_img, ang_img, width_img = post_process_output(pred['pos'], pred['cos'], pred['sin'], pred['width'])
        #print(pred['pos'].size())
        #print(pred['pos'])
        #print(pred['cos'])
        #print(pred['sin'])
        #print(pred['width'])
        if save:
            save_results(
                rgb_img=img_data.get_rgb(rgb, False),
                depth_img=np.squeeze(img_data.get_depth(depth)),
                grasp_q_img=q_img,
                grasp_angle_img=ang_img,
                no_grasps=n_grasps,
                grasp_width_img=width_img
            )
        else:
            fig = plt.figure(figsize=(10, 10))
            gs=plot_results(fig=fig,
                         rgb_img=img_data.get_rgb(rgb, False),
                         grasp_q_img=q_img,
                         grasp_angle_img=ang_img,
                         no_grasps=n_grasps,
                         grasp_width_img=width_img)
            fig.savefig('img_result.pdf')
            for g in gs:
            	print(g.center)
            	print(g.angle)
            	print(g.length)
            	print(g.width)
            

    return gs

#predict_grasp_angle("trained-models/cornell-randsplit-rgbd-grconvnet3-drop1-ch16/epoch_notbest_17_iou_0.00", "C:/Users/yashs/OneDrive/Desktop/PS simulation/rgbd_images/color8.png", "C:/Users/yashs/OneDrive/Desktop/PS simulation/rgbd_images/depth8.png")
    device = get_device(args.force_cpu)

    img_data = CameraData(include_depth=args.use_depth,
                          include_rgb=args.use_rgb)

    x, depth_img, rgb_img = img_data.get_data(rgb=rgb, depth=depth)

    with torch.no_grad():
        xc = x.to(device)
        pred = net.predict(xc)

        q_img, ang_img, width_img = post_process_output(
            pred['pos'], pred['cos'], pred['sin'], pred['width'])

        if args.save:
            save_results(rgb_img=img_data.get_rgb(rgb, False),
                         depth_img=np.squeeze(img_data.get_depth(depth)),
                         grasp_q_img=q_img,
                         grasp_angle_img=ang_img,
                         no_grasps=args.n_grasps,
                         grasp_width_img=width_img)
        else:
            fig = plt.figure(figsize=(10, 10))
            plot_results(fig=fig,
                         rgb_img=img_data.get_rgb(rgb, False),
                         grasp_q_img=q_img,
                         grasp_angle_img=ang_img,
                         no_grasps=args.n_grasps,
                         grasp_width_img=width_img)
            fig.savefig('img_result.pdf')
Beispiel #4
0
    logging.info('Done')

    try:
        fig = plt.figure(figsize=(10, 10))
        while True:
            image_bundle = cam.get_image_bundle()
            rgb = image_bundle['rgb']
            depth = image_bundle['aligned_depth']
            x, depth_img, rgb_img = cam_data.get_data(rgb=rgb, depth=depth)
            with torch.no_grad():
                xc = x.to(device)
                pred = net.predict(xc)

                q_img, ang_img, width_img = post_process_output(
                    pred['pos'], pred['cos'], pred['sin'], pred['width'])

                plot_results(fig=fig,
                             rgb_img=cam_data.get_rgb(rgb, False),
                             depth_img=np.squeeze(cam_data.get_depth(depth)),
                             grasp_q_img=q_img,
                             grasp_angle_img=ang_img,
                             no_grasps=args.n_grasps,
                             grasp_width_img=width_img)
    finally:
        save_results(rgb_img=cam_data.get_rgb(rgb, False),
                     depth_img=np.squeeze(cam_data.get_depth(depth)),
                     grasp_q_img=q_img,
                     grasp_angle_img=ang_img,
                     no_grasps=args.n_grasps,
                     grasp_width_img=width_img)