def __init__(self, prediction_model): self.grasp_model = Config.grasp_model self.shift_model = Config.shift_model self.with_types = 'types' in self.grasp_model[1] self.output_layer = 'prob' if not self.with_types else ['prob', 'type'] self.grasp_inference = InferencePlanarPose( Loader.get_model(self.grasp_model, output_layer=self.output_layer), box=Config.box, lower_random_pose=Config.lower_random_pose, upper_random_pose=Config.upper_random_pose, with_types=self.with_types, input_uncertainty=True, ) self.grasp_inference.keep_indixes = [0, 1] self.grasp_indexer = GraspIndexer( gripper_classes=Config.gripper_classes) self.shift_inference = InferencePlanarPose( Loader.get_model(self.shift_model, output_layer='prob'), box=Config.box, lower_random_pose=Config.lower_random_pose, upper_random_pose=Config.upper_random_pose, with_types=False, ) self.shift_inference.a_space = np.linspace( -3.0, 3.0, 26) # [rad] # Don't use a=0.0 self.shift_inference.size_original_cropped = (240, 240) self.shift_indexer = ShiftIndexer(shift_distance=Config.shift_distance) self.grasp_shift_indexer = GraspShiftIndexer( gripper_classes=Config.gripper_classes, shift_distance=Config.shift_distance, ) self.converter = Converter(grasp_z_offset=Config.grasp_z_offset, shift_z_offset=0.007, box=Config.box) # [m] self.prediction_model = prediction_model self.monte_carlo = 20 self.actions_since_last_inference = 0 self.actions: List[Action] = [] self.output_path = Path.home() / 'Desktop' self.reinfer_next_time = True # First inference is slower self.prediction_model.predict([ np.zeros((1, 64, 64, 1)), np.zeros((1, 1, 1, 1)), np.zeros((1, 1, 1)), np.zeros((1, 1, 1, 8)) ])
def test_agent_predict(self): # 2019-03-11-14-56-07-284, 2019-03-14-11-26-17-352, 2019-03-12-16-14-54-658 _, image = Loader.get_action('cylinder-cube-1', '2019-03-11-14-56-07-284', 'ed-v') if TEST_WITH_GPU: prediction_model = Loader.get_model('cylinder-cube-1', 'predict-generator-3', custom_objects={'_one_hot': one_hot_gen(4)}) grasp_model = Loader.get_model('cylinder-cube-1', 'model-6-arch-more-layer', output_layer='prob') shift_model = Loader.get_model('shifting', 'model-1', output_layer='prob') agent = PredictAgent(prediction_model, grasp_model, shift_model) agent.predict_actions([image], SelectionMethod.Top5, N=5, verbose=True)
def __init__(self, **params): self.model = Config.grasp_model self.watch_for_model_modification = True self.model_last_modified = Loader.get_model_path( self.model).stat().st_mtime self.monte_carlo = 40 if 'mc' in self.model[1] else None self.with_types = 'types' in self.model[1] self.output_layer = 'prob' if not self.with_types else ['prob', 'type'] self.inference = InferencePlanarPose( model=Loader.get_model(self.model, output_layer=self.output_layer), box=Config.box, lower_random_pose=Config.lower_random_pose, upper_random_pose=Config.upper_random_pose, monte_carlo=self.monte_carlo, with_types=self.with_types, ) self.inference.keep_indixes = None self.indexer = GraspIndexer(gripper_classes=Config.gripper_classes) self.converter = Converter(grasp_z_offset=Config.grasp_z_offset, box=Config.box) # # self.indexer = GraspFinalDIndexer(gripper_classes=Config.gripper_classes, final_d_classes=[0.0, 0.035]) # self.indexer = LateralIndexer( # angles=[(0, 0), (0.3, 0)], # gripper_classes=[0.05, 0.07, 0.084], # ) # self.converter = Converter(grasp_z_offset=Config.grasp_z_offset, box=Config.box) self.reinfer_next_time = True # Always true in contrast to AgentPredict
def infer(self, images: List[OrthographicImage], method: SelectionMethod) -> Action: if self.monte_carlo: # Adapt monte carlo progress parameter s epoch_in_database = Loader.get_episode_count(Config.grasp_database) s_not_bounded = (epoch_in_database - 3500) * 1 / (4500 - 3500) self.inference.current_s = max(min(s_not_bounded, 1.0), 0.0) current_model_st_mtime = Loader.get_model_path( self.model).stat().st_mtime if self.watch_for_model_modification and current_model_st_mtime > self.model_last_modified + 0.5: # [s] logger.warning(f'Reload model {self.model}.') try: self.inference.model = Loader.get_model( self.model, output_layer=self.output_layer) self.model_last_modified = Loader.get_model_path( self.model).stat().st_mtime except OSError: logger.info('Could not load model, probabily file locked.') if len(images) == 3: images[2].mat = images[2].mat[:, :, ::-1] # BGR to RGB action = self.inference.infer(images, method) self.indexer.to_action(action) estimated_reward_lower_than_threshold = action.estimated_reward < Config.bin_empty_at_max_probability bin_empty = estimated_reward_lower_than_threshold and Epoch.selection_method_should_be_high( method) if bin_empty: return Action('bin_empty', safe=1) self.converter.calculate_pose(action, images) return action
def test_heatmap(self): _, image = Loader.get_action('cylinder-cube-1', '2019-03-26-09-51-08-827', 'ed-v') if TEST_WITH_GPU: model = Loader.get_model('cylinder-cube-1', 'model-6-arch-more-layer', output_layer='prob') heatmapper = Heatmap(InferencePlanarPose, model, box=self.box) heatmap = heatmapper.render(image) imageio.imwrite(str(self.file_path / f'gen-heatmap.png'), heatmap)
def check_for_model_reload(self): current_model_st_mtime = Loader.get_model_path( self.model).stat().st_mtime if self.watch_for_model_modification and current_model_st_mtime > self.model_last_modified + 0.5: # [s] logger.warning(f'Reload model {self.model}.') try: self.inference.model = Loader.get_model( self.model, output_layer=self.output_layer) self.model_last_modified = Loader.get_model_path( self.model).stat().st_mtime except OSError: logger.info('Could not load model, probabily file locked.')
def __init__(self): # Parameters self.grasp_model = rospy.get_param('graspro/grasp_model', 'graspro-v1') self.gripper_classes = rospy.get_param('graspro/gripper_classes') self.z_offset = rospy.get_param('graspro/z_offset', 0.0) self.ensenso_depth = rospy.get_param('graspro/camera/ensenso_depth') self.realsense_depth = rospy.get_param( 'graspro/camera/realsense_depth') self.realsense_color = rospy.get_param( 'graspro/camera/realsense_color') self.lower_random_pose = rospy.get_param('graspro/lower_random_pose', [-0.1, -0.1, 0.0]) self.upper_random_pose = rospy.get_param('graspro/upper_random_pose', [0.1, 0.1, 0.0]) self.box_center = rospy.get_param('graspro/bin_center', [0, 0, 0]) self.box_size = rospy.get_param('graspro/bin_size', False) self.box = {'center': self.box_center, 'size': self.box_size} self.publish_heatmap = rospy.get_param('graspro/publish_heatmap', False) # Inference self.inference = PlanarInference( model=Loader.get_model(self.grasp_model, output_layer='prob'), box=self.box, lower_random_pose=self.lower_random_pose, upper_random_pose=self.upper_random_pose, ) self.indexer = GraspIndexer(gripper_classes=self.gripper_classes) self.converter = Converter(grasp_z_offset=self.z_offset, box=self.box) # [m] if self.publish_heatmap: self.heatmapper = Heatmap(self.inference, self.inference.model, box=self.box) self.heatmap_publisher = rospy.Publisher('graspro/heatmap') self.bridge = CvBridge() self.image_publisher = rospy.Publisher('graspro/pose_on_image', Image, queue_size=10) s1 = rospy.Service('graspro/infer_grasp', InferGrasp, self.infer_grasp) s2 = rospy.Service('graspro/estimate_reward_for_grasp', EstimateRewardForGrasp, self.estimate_reward_for_grasp) rospy.spin()
def __init__(self): self.grasp_inference = InferencePlanarPose( model=Loader.get_model(Config.grasp_model, output_layer='prob'), box=Config.box, lower_random_pose=Config.lower_random_pose, upper_random_pose=Config.upper_random_pose, ) self.grasp_indexer = GraspIndexer(gripper_classes=Config.gripper_classes) self.converter = Converter(grasp_z_offset=Config.grasp_z_offset, shift_z_offset=0.007, box=Config.box) # [m] if Config.shift_objects: self.shift_inference = InferencePlanarPose( model=Loader.get_model(Config.shift_model, output_layer='prob'), box=Config.box, lower_random_pose=Config.lower_random_pose, upper_random_pose=Config.upper_random_pose, ) self.shift_inference.a_space = np.linspace(-3.0, 3.0, 26) # [rad] # Don't use a=0.0 self.shift_inference.size_original_cropped = (240, 240) self.shift_indexer = ShiftIndexer(shift_distance=Config.shift_distance) self.reinfer_next_time = True # Always true in contrast to AgentPredict
from actions.indexer import GraspIndexer from config import Config from data.loader import Loader from inference.planar import InferencePlanarPose from picking.image import draw_pose from picking.param import SelectionMethod if __name__ == '__main__': # inference = InferencePlanarPose( # Loader.get_model('cylinder-cube-mc-1', 'model-1-mc', output_layer='prob'), # box=Config.box, # monte_carlo=160 # ) inference = InferencePlanarPose( Loader.get_model('cylinder-cube-1', 'model-6-arch-more-layer', output_layer='prob'), box=Config.box, ) # inference = InferencePlanarPose( # Loader.get_model('shifting', 'model-3'), # box=Config.box, # ) _, image = Loader.get_action('cylinder-cube-mc-1', '2019-07-02-13-27-22-246', 'ed-v') indexer = GraspIndexer(gripper_classes=Config.gripper_classes) converter = Converter(grasp_z_offset=Config.grasp_z_offset, box=Config.box)
import time from config import Config from data.loader import Loader if __name__ == '__main__': parser = argparse.ArgumentParser( description='Print summary of saved model.') parser.add_argument('-m', '--model', dest='model', type=str, required=True) parser.add_argument('--line-length', dest='line_length', type=int, default=140) args = parser.parse_args() model = Loader.get_model(args.model) model.summary(line_length=args.line_length) if 'placing' in args.model: model.get_layer('grasp').summary(line_length=args.line_length) model.get_layer('place').summary(line_length=args.line_length) model.get_layer('merge').summary(line_length=args.line_length) print('Grasp Dropout', model.get_layer('grasp').get_layer('dropout_6').rate) print('Place Dropout', model.get_layer('place').get_layer('dropout_21').rate) print('Merge Dropout', model.get_layer('merge').get_layer('dropout_24').rate)
from agents.agent_predict import Agent from data.loader import Loader from learning.utils.layers import one_hot_gen from picking.param import SelectionMethod # few objects (7-8): 2019-03-11-14-56-07-284 # many objects: 2019-07-01-14-03-11-150 # three cubes in a row: 2019-08-23-10-52-33-384 _, image = Loader.get_action('cylinder-cube-1', '2019-08-23-10-52-33-384', 'ed-v') pred_model = Loader.get_model('cube-1', 'predict-bi-gen-5') agent = Agent(pred_model) # agent.plan_actions([image], SelectionMethod.Top5, depth=7, leaves=2, verbose=False) # agent.plan_actions([image], SelectionMethod.Max, depth=7, leaves=1, verbose=False) agent.plan_actions([image], SelectionMethod.Top5, depth=5, leaves=3, verbose=True)
import numpy as np from tensorflow.keras import Model # pylint: disable=E0401 from config import Config from data.loader import Loader from utils.heatmap import Heatmap if __name__ == '__main__': save_path = Path(__file__).parent.parent.parent / 'test' / 'generated' collection = 'placing-3' episode_id = '2020-02-04-00-34-54-455' # episode_id = '2020-02-04-00-18-07-322' model = Loader.get_model('placing-3-32-part-type-2') action_grasp, image_grasp = Loader.get_action(collection, episode_id, 0, 'ed-v') action_place, image_place, image_goal = Loader.get_action( collection, episode_id, 1, ['ed-v', 'ed-goal']) g = model.get_layer('grasp') p = model.get_layer('place') grasp_output = ['reward_grasp', 'z_m0'] place_output = ['reward_place', 'z_p'] grasp_model = Model(inputs=g.input, outputs=[g.get_layer(l).output for l in grasp_output]) place_model = Model(inputs=p.input,
import cv2 import numpy as np from tensorflow.keras import Model # pylint: disable=E0401 from actions.action import Affine, RobotPose from config import Config from data.loader import Loader from utils.image import draw_around_box, get_area_of_interest_new if __name__ == '__main__': save_path = Path(__file__).parent.parent.parent / 'test' / 'generated' collection = 'placing-3' episode_id = '2020-01-30-11-30-51-981' combined_model = Loader.get_model('placing-3-21-part-type-2') action_grasp, image_grasp = Loader.get_action(collection, episode_id, 0, 'ed-v') action_place, image_place, image_goal = Loader.get_action( collection, episode_id, 1, ['ed-v', 'ed-goal']) draw_around_box(image_grasp, box=Config.box) draw_around_box(image_place, box=Config.box) draw_around_box(image_goal, box=Config.box) pose_grasp = action_grasp.pose # pose_grasp = RobotPose(Affine(x=-0.0053, y=0.0414, a=1.4708)) pose_place = action_place.pose # pose_place = RobotPose(Affine(x=-0.0025, y=0.0563, a=-1.4708))
uncertainty = np.nanvar(result, axis=0) uncertainty /= np.nanmax(uncertainty) * 0.25 uncertainty = np.clip(uncertainty * 255, 0, 255).astype(np.uint8) uncertainty = cv2.applyColorMap(uncertainty, cv2.COLORMAP_JET) cv2.imwrite(str(save_dir / f'{predictor.name}_unc.png'), uncertainty) # for i in range(3): # cv2.imwrite(str(save_dir / f'{predictor.name}_result_{i}.png'), result[i] * 255) # cv2.waitKey(1000) if __name__ == '__main__': predict_bicycle = PredictBicycle(Loader.get_model('cube-1', 'predict-bi-gen-5'), name='bi') predict_pix2pix = PredictPix2Pix(Loader.get_model('cube-1', 'predict-generator-4'), name='pix2pix') predict_vae = PredictVAE(Loader.get_model('cube-1', 'predict-vae-2'), name='vae') for p in [predict_bicycle, predict_pix2pix, predict_vae]: save_episode(p, 'cylinder-cube-1', '2019-03-11-14-56-07-284') save_episode(p, 'cylinder-cube-1', '2019-07-01-14-05-53-150') save_episode(p, 'cylinder-cube-1', '2019-07-01-13-50-06-016') save_episode(p, 'cylinder-cube-1', '2019-07-01-11-10-31-450', reward=0) save_episode(p, 'shifting', '2018-12-11-09-54-26-687',
def bin_picking(): # agent = Agent() # agent.inference.current_type = 2 # agent = AgentShift() agent = AgentPredict(prediction_model=Loader.get_model('cube-1', 'predict-bi-gen-5')) agent.grasp_inference.current_type = 2 camera = Camera(camera_suffixes=Config.camera_suffixes) episode_history = EpisodeHistory() gripper = Gripper('172.16.0.2', Config.gripper_speed) robot = Robot('panda_arm', Config.general_dynamics_rel) saver = Saver(Config.database_url, Config.grasp_database) current_bin = Config.start_bin md = MotionData().with_dynamics(1.0) gripper.stop() robot.recover_from_errors() move_to_safety(robot, md) move_joints_successful = robot.move_joints(Frames.bin_joint_values[current_bin], md) if not move_joints_successful: gripper.move(0.07) robot.recover_from_errors() move_to_safety(robot, md) move_joints_successful = robot.move_joints(Frames.bin_joint_values[current_bin], md) if Config.mode is Mode.Measure and not Config.home_gripper: logger.warning('Want to measure without homing gripper?') elif Config.mode is Mode.Measure and Config.home_gripper: gripper.homing() move_to_safety(robot, md) gripper.homing() overall_start = time.time() for epoch in Config.epochs: while episode_history.total() < epoch.number_episodes: current_episode = Episode() current_selection_method = epoch.get_selection_method() if Config.mode in [Mode.Evaluate, Mode.Perform]: current_selection_method = epoch.get_selection_method_perform(episode_history.failed_grasps_since_last_success_in_bin(current_bin)) start = time.time() if (not Config.predict_images) or agent.reinfer_next_time: robot.recover_from_errors() robot.move_joints(Frames.bin_joint_values[current_bin], md) b, c = random.choice(Config.overview_image_angles) if Config.lateral_overview_image else 0, 0 camera_frame_overview = Frames.get_camera_frame(current_bin, b=b, c=c) if not Frames.is_camera_frame_safe(camera_frame_overview): continue if Config.mode is Mode.Measure or Config.lateral_overview_image: robot.move_cartesian(Frames.camera, camera_frame_overview, md) image_frame = robot.current_pose(Frames.camera) images = take_images(current_bin, camera, robot, image_frame=image_frame) if not Frames.is_gripper_frame_safe(robot.current_pose(Frames.gripper)): logger.info('Image frame not safe!') robot.recover_from_errors() continue input_images = list(filter(lambda i: i.camera in Config.model_input_suffixes, images)) # if episode_history.data: # agent.successful_grasp_before = episode_history.data[-1].actions[0].reward > 0 action = agent.infer(input_images, current_selection_method) action.images = {} action.save = True if Config.mode is Mode.Measure: saver.save_image(images, current_episode.id, 'v', action=action) if Config.mode is not Mode.Perform: saver.save_action_plan(action, current_episode.id) logger.info(f'Action: {action} at time {time.time() - overall_start:0.1f}') action.reward = 0.0 action.collision = False action.bin = current_bin if Config.set_zero_reward: action.safe = -1 if action.type == 'bin_empty': action.save = False elif action.type == 'new_image': action.save = False agent.reinfer_next_time = True if action.safe == 0: logger.warning('Action ignored.') action.save = False else: if Config.mode is Mode.Measure and Config.take_lateral_images and action.save: md_lateral = MotionData().with_dynamics(1.0) for b, c in Config.lateral_images_angles: lateral_frame = Frames.get_camera_frame(current_bin, a=action.pose.a, b=b, c=c, reference_pose=image_frame) if not Frames.is_camera_frame_safe(lateral_frame) or (b == 0.0 and c == 0.0): continue lateral_move_succss = robot.move_cartesian(Frames.camera, lateral_frame, md_lateral) # Remove a for global b, c pose if lateral_move_succss: saver.save_image(take_images(current_bin, camera, robot), current_episode.id, f'lateral_b{b:0.3f}_c{c:0.3f}'.replace('.', '_'), action=action) if action.safe == 1 and action.type not in ['bin_empty', 'new_image']: action_frame = Frames.get_action_pose(action_pose=action.pose, image_pose=image_frame) if Config.mode is Mode.Measure and Config.take_direct_images: robot.move_cartesian(Frames.camera, Affine(z=0.308) * Affine(b=0.0, c=0.0) * action_frame, md) saver.save_image(take_images(current_bin, camera, robot), current_episode.id, 'direct', action=action) if action.type == 'grasp': grasp(robot, gripper, current_episode, current_bin, action, action_frame, image_frame, camera, saver, md) elif action.type == 'shift': old_reward_around_action = 0.0 shift(robot, gripper, current_episode, current_bin, action, action_frame, image_frame, camera, saver, md) new_reward_around_action = 0.0 action.reward = new_reward_around_action - old_reward_around_action elif action.type == 'place': last_grasp = episode_history.data[-1].actions[0] action.grasp_episode_id = episode_history.data[-1].id place(robot, gripper, current_episode, current_bin, action, action_frame, last_grasp, image_frame, camera, saver, md) elif action.safe < 0: logger.info('Pose not found.') action.collision = True if Config.take_after_images: robot.move_cartesian(Frames.camera, image_frame, md) saver.save_image(take_images(current_bin, camera, robot), current_episode.id, 'after', action=action) action.execution_time = time.time() - start logger.info(f'Time for action: {action.execution_time:0.3f} [s]') if action.save: current_episode.actions.append(action) if Config.mode is Mode.Measure: logger.info(f'Save episode {current_episode.id}.') saver.save_episode(current_episode) episode_history.append(current_episode) logger.info(f'Episodes (reward / done / total): {episode_history.total_reward(action_type="grasp")} / {episode_history.total()} / {sum(e.number_episodes for e in Config.epochs)}') logger.info(f'Last success: {episode_history.failed_grasps_since_last_success_in_bin(current_bin)} cycles ago.') # episode_history.save_grasp_rate_prediction_step_evaluation(Config.evaluation_path) # Change bin should_change_bin_for_evaluation = (Config.mode is Mode.Evaluate and episode_history.successful_grasps_in_bin(current_bin) == Config.change_bin_at_number_of_success_grasps) should_change_bin = (Config.mode is not Mode.Evaluate and (episode_history.failed_grasps_since_last_success_in_bin(current_bin) >= Config.change_bin_at_number_of_failed_grasps or action.type == 'bin_empty')) if should_change_bin_for_evaluation or (Config.change_bins and should_change_bin): if Config.mode is Mode.Evaluate: pass current_bin = Frames.get_next_bin(current_bin) agent.reinfer_next_time = True logger.info('Switch to other bin.') if Config.mode is not Mode.Perform and Config.home_gripper: gripper.homing() # Retrain model if Config.train_model and episode_history.total() > 0 and not episode_history.total() % Config.train_model_every_number_cycles: logger.warning('Retrain model!') with open('/tmp/training.txt', 'wb') as out: p = Popen([sys.executable, str(Config.train_script)], stdout=out) if not Config.train_async: p.communicate() logger.info('Finished cleanly.')
import cv2 import numpy as np from config import Config from data.loader import Loader from inference.planar import InferencePlanarPose from picking.image import draw_pose if __name__ == '__main__': os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES'] = str(1) np.set_printoptions(suppress=True) action, image = Loader.get_action('cylinder-cube-mc-1', '2019-07-02-19-55-54-845', 'ed-v') inference = InferencePlanarPose( model=Loader.get_model('cylinder-cube-mc-1', 'model-1-mc'), box=Config.box, monte_carlo=160, ) estimated_reward, estimated_reward_std = inference.infer_at_pose([image], action.pose) print(estimated_reward, estimated_reward_std) draw_pose(image, action.pose, convert_to_rgb=True) cv2.imshow('image', image.mat) cv2.waitKey(1500)