def __init__(self, **params): self.model = Config.grasp_model self.watch_for_model_modification = True self.model_last_modified = Loader.get_model_path( self.model).stat().st_mtime self.monte_carlo = 40 if 'mc' in self.model[1] else None self.with_types = 'types' in self.model[1] self.output_layer = 'prob' if not self.with_types else ['prob', 'type'] self.inference = InferencePlanarPose( model=Loader.get_model(self.model, output_layer=self.output_layer), box=Config.box, lower_random_pose=Config.lower_random_pose, upper_random_pose=Config.upper_random_pose, monte_carlo=self.monte_carlo, with_types=self.with_types, ) self.inference.keep_indixes = None self.indexer = GraspIndexer(gripper_classes=Config.gripper_classes) self.converter = Converter(grasp_z_offset=Config.grasp_z_offset, box=Config.box) # # self.indexer = GraspFinalDIndexer(gripper_classes=Config.gripper_classes, final_d_classes=[0.0, 0.035]) # self.indexer = LateralIndexer( # angles=[(0, 0), (0.3, 0)], # gripper_classes=[0.05, 0.07, 0.084], # ) # self.converter = Converter(grasp_z_offset=Config.grasp_z_offset, box=Config.box) self.reinfer_next_time = True # Always true in contrast to AgentPredict
def test_shift_conversion(self): conv = Converter(shift_z_offset=0.0, box=self.box) image = OrthographicImage( self.read_image(self.file_path / self.image_name), 2000.0, 0.22, 0.4, '', Config.default_image_pose) action = Action() action.type = 'shift' action.pose = RobotPose() action.pose.x = -0.02 action.pose.y = 0.105 action.pose.a = 1.52 action.pose.d = 0.078 action.index = 1 action.shift_motion = [0.03, 0.0] draw_pose(image, action.pose, convert_to_rgb=True) draw_around_box(image, box=self.box, draw_lines=True) imageio.imwrite(str(self.file_path / 'gen-shift-draw-pose.png'), image.mat) self.assertTrue(conv.shift_check_safety(action, [image])) conv.calculate_pose(action, [image]) self.assertLess(action.pose.z, 0.0)
def __init__(self, prediction_model): self.grasp_model = Config.grasp_model self.shift_model = Config.shift_model self.with_types = 'types' in self.grasp_model[1] self.output_layer = 'prob' if not self.with_types else ['prob', 'type'] self.grasp_inference = InferencePlanarPose( Loader.get_model(self.grasp_model, output_layer=self.output_layer), box=Config.box, lower_random_pose=Config.lower_random_pose, upper_random_pose=Config.upper_random_pose, with_types=self.with_types, input_uncertainty=True, ) self.grasp_inference.keep_indixes = [0, 1] self.grasp_indexer = GraspIndexer( gripper_classes=Config.gripper_classes) self.shift_inference = InferencePlanarPose( Loader.get_model(self.shift_model, output_layer='prob'), box=Config.box, lower_random_pose=Config.lower_random_pose, upper_random_pose=Config.upper_random_pose, with_types=False, ) self.shift_inference.a_space = np.linspace( -3.0, 3.0, 26) # [rad] # Don't use a=0.0 self.shift_inference.size_original_cropped = (240, 240) self.shift_indexer = ShiftIndexer(shift_distance=Config.shift_distance) self.grasp_shift_indexer = GraspShiftIndexer( gripper_classes=Config.gripper_classes, shift_distance=Config.shift_distance, ) self.converter = Converter(grasp_z_offset=Config.grasp_z_offset, shift_z_offset=0.007, box=Config.box) # [m] self.prediction_model = prediction_model self.monte_carlo = 20 self.actions_since_last_inference = 0 self.actions: List[Action] = [] self.output_path = Path.home() / 'Desktop' self.reinfer_next_time = True # First inference is slower self.prediction_model.predict([ np.zeros((1, 64, 64, 1)), np.zeros((1, 1, 1, 1)), np.zeros((1, 1, 1)), np.zeros((1, 1, 1, 8)) ])
def __init__(self): # Parameters self.grasp_model = rospy.get_param('graspro/grasp_model', 'graspro-v1') self.gripper_classes = rospy.get_param('graspro/gripper_classes') self.z_offset = rospy.get_param('graspro/z_offset', 0.0) self.ensenso_depth = rospy.get_param('graspro/camera/ensenso_depth') self.realsense_depth = rospy.get_param( 'graspro/camera/realsense_depth') self.realsense_color = rospy.get_param( 'graspro/camera/realsense_color') self.lower_random_pose = rospy.get_param('graspro/lower_random_pose', [-0.1, -0.1, 0.0]) self.upper_random_pose = rospy.get_param('graspro/upper_random_pose', [0.1, 0.1, 0.0]) self.box_center = rospy.get_param('graspro/bin_center', [0, 0, 0]) self.box_size = rospy.get_param('graspro/bin_size', False) self.box = {'center': self.box_center, 'size': self.box_size} self.publish_heatmap = rospy.get_param('graspro/publish_heatmap', False) # Inference self.inference = PlanarInference( model=Loader.get_model(self.grasp_model, output_layer='prob'), box=self.box, lower_random_pose=self.lower_random_pose, upper_random_pose=self.upper_random_pose, ) self.indexer = GraspIndexer(gripper_classes=self.gripper_classes) self.converter = Converter(grasp_z_offset=self.z_offset, box=self.box) # [m] if self.publish_heatmap: self.heatmapper = Heatmap(self.inference, self.inference.model, box=self.box) self.heatmap_publisher = rospy.Publisher('graspro/heatmap') self.bridge = CvBridge() self.image_publisher = rospy.Publisher('graspro/pose_on_image', Image, queue_size=10) s1 = rospy.Service('graspro/infer_grasp', InferGrasp, self.infer_grasp) s2 = rospy.Service('graspro/estimate_reward_for_grasp', EstimateRewardForGrasp, self.estimate_reward_for_grasp) rospy.spin()
def __init__(self): self.grasp_inference = InferencePlanarPose( model=Loader.get_model(Config.grasp_model, output_layer='prob'), box=Config.box, lower_random_pose=Config.lower_random_pose, upper_random_pose=Config.upper_random_pose, ) self.grasp_indexer = GraspIndexer(gripper_classes=Config.gripper_classes) self.converter = Converter(grasp_z_offset=Config.grasp_z_offset, shift_z_offset=0.007, box=Config.box) # [m] if Config.shift_objects: self.shift_inference = InferencePlanarPose( model=Loader.get_model(Config.shift_model, output_layer='prob'), box=Config.box, lower_random_pose=Config.lower_random_pose, upper_random_pose=Config.upper_random_pose, ) self.shift_inference.a_space = np.linspace(-3.0, 3.0, 26) # [rad] # Don't use a=0.0 self.shift_inference.size_original_cropped = (240, 240) self.shift_indexer = ShiftIndexer(shift_distance=Config.shift_distance) self.reinfer_next_time = True # Always true in contrast to AgentPredict
def test_grasp_conversion(self): conv = Converter(grasp_z_offset=0.0, box=self.box) image = OrthographicImage( self.read_image(self.file_path / self.image_name), 2000.0, 0.22, 0.4, '', Config.default_image_pose) action = Action() action.type = 'grasp' action.pose.x = -0.06 action.pose.y = 0.099 action.pose.a = 0.523 action.pose.d = 0.078 action.index = 1 draw_pose(image, action.pose, convert_to_rgb=True) draw_around_box(image, box=self.box, draw_lines=True) imageio.imwrite(str(self.file_path / 'gen-grasp-draw-pose.png'), image.mat) self.assertTrue(conv.grasp_check_safety(action, [image])) conv.calculate_pose(action, [image]) self.assertLess(action.pose.z, 0.0)
Loader.get_model('cylinder-cube-1', 'model-6-arch-more-layer', output_layer='prob'), box=Config.box, ) # inference = InferencePlanarPose( # Loader.get_model('shifting', 'model-3'), # box=Config.box, # ) _, image = Loader.get_action('cylinder-cube-mc-1', '2019-07-02-13-27-22-246', 'ed-v') indexer = GraspIndexer(gripper_classes=Config.gripper_classes) converter = Converter(grasp_z_offset=Config.grasp_z_offset, box=Config.box) times = [] for i in range(1): start = time.time() action = inference.infer([image], SelectionMethod.Top5, verbose=1) indexer.to_action(action) end = time.time() times.append(end - start) converter.calculate_pose(action, [image]) print(action) print( f'Mean inference time [ms]: {(np.array(times[1:]).mean() * 1000):0.5f}' )
parser.add_argument('--convert', action='store_true') parser.add_argument('--show', action='store_true') args = parser.parse_args() action, image = Loader.get_action(args.collection, args.episode, args.action, args.suffix) print('Action: ', action) if args.area: area_image = get_area_of_interest_new(image, action.pose, size_cropped=(200, 200)) if args.convert: converter = Converter(grasp_z_offset=0.015, box=Config.box) converter.grasp_convert(action, [image]) if args.show: cv2.imshow('area_image', area_image.mat) else: if args.draw: draw_around_box(image, box=Config.box) draw_pose(image, action.pose, convert_to_rgb=True) if args.show: cv2.imshow('image', image.mat) if args.show: cv2.waitKey(3000)
def test_inside_box(self): action = Loader.get_action('cylinder-cube-1', '2019-06-25-14-59-51-451') conv = Converter(box=Config.box) self.assertFalse(conv.is_pose_inside_box(action.pose))