def grasp():
    # Load image
    logging.info('Loading image...')
    pic = Image.open('rgb.png', 'r')
    rgb = np.array(pic)
    pic = Image.open('depth.png', 'r')
    depth = np.expand_dims(np.array(pic), axis=2)

    # Load Network
    logging.info('Loading model...')
    net = torch.load(args.network, map_location=torch.device('cpu'))
    logging.info('Done')

    # Get the compute device
    device = get_device(False)

    img_data = CameraData(include_depth=1, include_rgb=1)

    x, depth_img, rgb_img = img_data.get_data(rgb=rgb, depth=depth)

    with torch.no_grad():
        xc = x.to(device)
        pred = net.predict(xc)

        q_img, ang_img, width_img = post_process_output(
            pred['pos'], pred['cos'], pred['sin'], pred['width'])

        if args.save:
            save_results(rgb_img=img_data.get_rgb(rgb, False),
                         depth_img=np.squeeze(img_data.get_depth(depth)),
                         grasp_q_img=q_img,
                         grasp_angle_img=ang_img,
                         no_grasps=args.n_grasps,
                         grasp_width_img=width_img)
        else:
            fig = plt.figure(figsize=(12, 3))
            gs = plot_results(fig=fig,
                              rgb_img=img_data.get_rgb(rgb, False),
                              grasp_q_img=q_img,
                              grasp_angle_img=ang_img,
                              no_grasps=args.n_grasps,
                              grasp_width_img=width_img)
            fig.savefig('img_result.png')
def predict_grasp_angle(network, rgb_path, depth_path):
    #args = parse_args()

    #network = "trained-models/cornell-randsplit-rgbd-grconvnet3-drop1-ch16/epoch_30_iou_0.97"
    #rgb_path = "C:/Users/yashs/OneDrive/Desktop/PS simulation/rgbd_images/color8.jpeg"
    #depth_path = "C:/Users/yashs/OneDrive/Desktop/PS simulation/rgbd_images/depth8.jpeg"
    use_depth = 1
    use_rgb = 1 
    n_grasps = 1
    save = 0
    force_cpu = False

    # Load image
    logging.info('Loading image...')
    pic = Image.open(rgb_path, 'r')
    rgb = np.array(pic)
    pic = Image.open(depth_path, 'r')
    depth = np.expand_dims(np.array(pic), axis=2)

    # Load Network
    logging.info('Loading model...')
    net = torch.load(network,map_location=torch.device('cpu'))
    logging.info('Done')

    # Get the compute device
    device = get_device(force_cpu)

    img_data = CameraData(include_depth=use_depth, include_rgb=use_rgb)

    x, depth_img, rgb_img = img_data.get_data(rgb=rgb, depth=depth)

    with torch.no_grad():
        xc = x.to(device)
        pred = net.predict(xc)

        q_img, ang_img, width_img = post_process_output(pred['pos'], pred['cos'], pred['sin'], pred['width'])
        #print(pred['pos'].size())
        #print(pred['pos'])
        #print(pred['cos'])
        #print(pred['sin'])
        #print(pred['width'])
        if save:
            save_results(
                rgb_img=img_data.get_rgb(rgb, False),
                depth_img=np.squeeze(img_data.get_depth(depth)),
                grasp_q_img=q_img,
                grasp_angle_img=ang_img,
                no_grasps=n_grasps,
                grasp_width_img=width_img
            )
        else:
            fig = plt.figure(figsize=(10, 10))
            gs=plot_results(fig=fig,
                         rgb_img=img_data.get_rgb(rgb, False),
                         grasp_q_img=q_img,
                         grasp_angle_img=ang_img,
                         no_grasps=n_grasps,
                         grasp_width_img=width_img)
            fig.savefig('img_result.pdf')
            for g in gs:
            	print(g.center)
            	print(g.angle)
            	print(g.length)
            	print(g.width)
            

    return gs

#predict_grasp_angle("trained-models/cornell-randsplit-rgbd-grconvnet3-drop1-ch16/epoch_notbest_17_iou_0.00", "C:/Users/yashs/OneDrive/Desktop/PS simulation/rgbd_images/color8.png", "C:/Users/yashs/OneDrive/Desktop/PS simulation/rgbd_images/depth8.png")
    rgb = np.array(pic)
    pic = Image.open(args.depth_path, 'r')
    depth = np.expand_dims(np.array(pic), axis=2)

    # Load Network
    logging.info('Loading model...')
    net = torch.load(args.network)
    logging.info('Done')

    # Get the compute device
    device = get_device(args.force_cpu)

    img_data = CameraData(include_depth=args.use_depth,
                          include_rgb=args.use_rgb)

    x, depth_img, rgb_img = img_data.get_data(rgb=rgb, depth=depth)

    with torch.no_grad():
        xc = x.to(device)
        pred = net.predict(xc)

        q_img, ang_img, width_img = post_process_output(
            pred['pos'], pred['cos'], pred['sin'], pred['width'])

        if args.save:
            save_results(rgb_img=img_data.get_rgb(rgb, False),
                         depth_img=np.squeeze(img_data.get_depth(depth)),
                         grasp_q_img=q_img,
                         grasp_angle_img=ang_img,
                         no_grasps=args.n_grasps,
                         grasp_width_img=width_img)
Beispiel #4
0
class GraspGenerator:
    def __init__(self, saved_model_path, cam_id, visualize=False):
        self.saved_model_path = saved_model_path
        self.camera = RealSenseCamera(device_id=cam_id)

        self.saved_model_path = saved_model_path
        self.model = None
        self.device = None

        self.cam_data = CameraData(include_depth=True, include_rgb=True)

        # Connect to camera
        self.camera.connect()

        # Load camera pose and depth scale (from running calibration)
        self.cam_pose = np.loadtxt('saved_data/camera_pose.txt', delimiter=' ')
        self.cam_depth_scale = np.loadtxt('saved_data/camera_depth_scale.txt',
                                          delimiter=' ')

        homedir = os.path.join(os.path.expanduser('~'), "grasp-comms")
        self.grasp_request = os.path.join(homedir, "grasp_request.npy")
        self.grasp_available = os.path.join(homedir, "grasp_available.npy")
        self.grasp_pose = os.path.join(homedir, "grasp_pose.npy")

        if visualize:
            self.fig = plt.figure(figsize=(10, 10))
        else:
            self.fig = None

    def load_model(self):
        print('Loading model... ')
        self.model = torch.load(self.saved_model_path)
        # Get the compute device
        self.device = get_device(force_cpu=False)

    def generate(self):
        # Get RGB-D image from camera
        image_bundle = self.camera.get_image_bundle()
        rgb = image_bundle['rgb']
        depth = image_bundle['aligned_depth']
        x, depth_img, rgb_img = self.cam_data.get_data(rgb=rgb, depth=depth)

        # Predict the grasp pose using the saved model
        with torch.no_grad():
            xc = x.to(self.device)
            pred = self.model.predict(xc)

        q_img, ang_img, width_img = post_process_output(
            pred['pos'], pred['cos'], pred['sin'], pred['width'])
        grasps = detect_grasps(q_img, ang_img, width_img)

        # Get grasp position from model output
        pos_z = depth[grasps[0].center[0] + self.cam_data.top_left[0],
                      grasps[0].center[1] +
                      self.cam_data.top_left[1]] * self.cam_depth_scale - 0.04
        pos_x = np.multiply(
            grasps[0].center[1] + self.cam_data.top_left[1] -
            self.camera.intrinsics.ppx, pos_z / self.camera.intrinsics.fx)
        pos_y = np.multiply(
            grasps[0].center[0] + self.cam_data.top_left[0] -
            self.camera.intrinsics.ppy, pos_z / self.camera.intrinsics.fy)

        if pos_z == 0:
            return

        target = np.asarray([pos_x, pos_y, pos_z])
        target.shape = (3, 1)
        print('target: ', target)

        # Convert camera to robot coordinates
        camera2robot = self.cam_pose
        target_position = np.dot(camera2robot[0:3, 0:3],
                                 target) + camera2robot[0:3, 3:]
        target_position = target_position[0:3, 0]

        # Convert camera to robot angle
        angle = np.asarray([0, 0, grasps[0].angle])
        angle.shape = (3, 1)
        target_angle = np.dot(camera2robot[0:3, 0:3], angle)

        # Concatenate grasp pose with grasp angle
        grasp_pose = np.append(target_position, target_angle[2])

        print('grasp_pose: ', grasp_pose)

        np.save(self.grasp_pose, grasp_pose)

        if self.fig:
            plot_grasp(fig=self.fig,
                       rgb_img=self.cam_data.get_rgb(rgb, False),
                       grasps=grasps,
                       save=True)

    def run(self):
        while True:
            if np.load(self.grasp_request):
                self.generate()
                np.save(self.grasp_request, 0)
                np.save(self.grasp_available, 1)
            else:
                time.sleep(0.1)