def grasp(): # Load image logging.info('Loading image...') pic = Image.open('rgb.png', 'r') rgb = np.array(pic) pic = Image.open('depth.png', 'r') depth = np.expand_dims(np.array(pic), axis=2) # Load Network logging.info('Loading model...') net = torch.load(args.network, map_location=torch.device('cpu')) logging.info('Done') # Get the compute device device = get_device(False) img_data = CameraData(include_depth=1, include_rgb=1) x, depth_img, rgb_img = img_data.get_data(rgb=rgb, depth=depth) with torch.no_grad(): xc = x.to(device) pred = net.predict(xc) q_img, ang_img, width_img = post_process_output( pred['pos'], pred['cos'], pred['sin'], pred['width']) if args.save: save_results(rgb_img=img_data.get_rgb(rgb, False), depth_img=np.squeeze(img_data.get_depth(depth)), grasp_q_img=q_img, grasp_angle_img=ang_img, no_grasps=args.n_grasps, grasp_width_img=width_img) else: fig = plt.figure(figsize=(12, 3)) gs = plot_results(fig=fig, rgb_img=img_data.get_rgb(rgb, False), grasp_q_img=q_img, grasp_angle_img=ang_img, no_grasps=args.n_grasps, grasp_width_img=width_img) fig.savefig('img_result.png')
def predict_grasp_angle(network, rgb_path, depth_path): #args = parse_args() #network = "trained-models/cornell-randsplit-rgbd-grconvnet3-drop1-ch16/epoch_30_iou_0.97" #rgb_path = "C:/Users/yashs/OneDrive/Desktop/PS simulation/rgbd_images/color8.jpeg" #depth_path = "C:/Users/yashs/OneDrive/Desktop/PS simulation/rgbd_images/depth8.jpeg" use_depth = 1 use_rgb = 1 n_grasps = 1 save = 0 force_cpu = False # Load image logging.info('Loading image...') pic = Image.open(rgb_path, 'r') rgb = np.array(pic) pic = Image.open(depth_path, 'r') depth = np.expand_dims(np.array(pic), axis=2) # Load Network logging.info('Loading model...') net = torch.load(network,map_location=torch.device('cpu')) logging.info('Done') # Get the compute device device = get_device(force_cpu) img_data = CameraData(include_depth=use_depth, include_rgb=use_rgb) x, depth_img, rgb_img = img_data.get_data(rgb=rgb, depth=depth) with torch.no_grad(): xc = x.to(device) pred = net.predict(xc) q_img, ang_img, width_img = post_process_output(pred['pos'], pred['cos'], pred['sin'], pred['width']) #print(pred['pos'].size()) #print(pred['pos']) #print(pred['cos']) #print(pred['sin']) #print(pred['width']) if save: save_results( rgb_img=img_data.get_rgb(rgb, False), depth_img=np.squeeze(img_data.get_depth(depth)), grasp_q_img=q_img, grasp_angle_img=ang_img, no_grasps=n_grasps, grasp_width_img=width_img ) else: fig = plt.figure(figsize=(10, 10)) gs=plot_results(fig=fig, rgb_img=img_data.get_rgb(rgb, False), grasp_q_img=q_img, grasp_angle_img=ang_img, no_grasps=n_grasps, grasp_width_img=width_img) fig.savefig('img_result.pdf') for g in gs: print(g.center) print(g.angle) print(g.length) print(g.width) return gs #predict_grasp_angle("trained-models/cornell-randsplit-rgbd-grconvnet3-drop1-ch16/epoch_notbest_17_iou_0.00", "C:/Users/yashs/OneDrive/Desktop/PS simulation/rgbd_images/color8.png", "C:/Users/yashs/OneDrive/Desktop/PS simulation/rgbd_images/depth8.png")
if args.jacquard_output: grasps = grasp.detect_grasps(q_img, ang_img, width_img=width_img, no_grasps=1) with open(jo_fn, 'a') as f: for g in grasps: f.write(test_data.dataset.get_jname(didx) + '\n') f.write(g.to_jacquard(scale=1024 / 300) + '\n') if args.vis: save_results( rgb_img=test_data.dataset.get_rgb(didx, rot, zoom, normalise=False), depth_img=test_data.dataset.get_depth(didx, rot, zoom), grasp_q_img=q_img, grasp_angle_img=ang_img, no_grasps=args.n_grasps, grasp_width_img=width_img) avg_time = (time.time() - start_time) / len(test_data) logging.info('Average evaluation time per image: {}ms'.format( avg_time * 1000)) if args.iou_eval: logging.info('IOU Results: %d/%d = %f' % (results['correct'], results['correct'] + results['failed'], results['correct'] / (results['correct'] + results['failed'])))
device = get_device(args.force_cpu) img_data = CameraData(include_depth=args.use_depth, include_rgb=args.use_rgb) x, depth_img, rgb_img = img_data.get_data(rgb=rgb, depth=depth) with torch.no_grad(): xc = x.to(device) pred = net.predict(xc) q_img, ang_img, width_img = post_process_output( pred['pos'], pred['cos'], pred['sin'], pred['width']) if args.save: save_results(rgb_img=img_data.get_rgb(rgb, False), depth_img=np.squeeze(img_data.get_depth(depth)), grasp_q_img=q_img, grasp_angle_img=ang_img, no_grasps=args.n_grasps, grasp_width_img=width_img) else: fig = plt.figure(figsize=(10, 10)) plot_results(fig=fig, rgb_img=img_data.get_rgb(rgb, False), grasp_q_img=q_img, grasp_angle_img=ang_img, no_grasps=args.n_grasps, grasp_width_img=width_img) fig.savefig('img_result.pdf')