R_gripper_world = np.array([[1.0, 0, 0], [0, -1.0, 0], [0, 0, -1.0]]) t_gripper_world = np.array([target_pt_world.x + config['gripper_offset_x'], target_pt_world.y + config['gripper_offset_y'], target_pt_world.z + config['gripper_offset_z']]) T_gripper_world = RigidTransform(rotation=R_gripper_world, translation=t_gripper_world, from_frame='gripper', to_frame='cb') logging.info('Moving robot to point x=%f, y=%f, z=%f' %(t_gripper_world[0], t_gripper_world[1], t_gripper_world[2])) T_lift = RigidTransform(translation=(0,0,0.05), from_frame='cb', to_frame='cb') T_gripper_world_lift = T_lift * T_gripper_world y.right.goto_pose(T_gripper_world_lift) y.right.goto_pose(T_gripper_world) # wait for human measurement yesno = raw_input('Take measurement. Hit [ENTER] when done') y.right.goto_pose(T_gripper_world_lift) y.right.goto_pose(T_orig_gripper_world_lift) # stop robot y.reset_home() if 'reset_bin' in config.keys() and config['reset_bin']: y.reset_bin() y.stop() sensor.stop()
parser.add_argument('--config_filename', type=str, default='cfg/tools/capture_test_images.yaml', help='path to configuration file to use') args = parser.parse_args() config_filename = args.config_filename output_dir = args.output_dir # read config config = YamlConfig(config_filename) vis = config['vis'] # make output dir if needed if not os.path.exists(output_dir): os.makedirs(output_dir) # read rescale factor rescale_factor = 1.0 if 'rescale_factor' in config.keys(): rescale_factor = config['rescale_factor'] # read workspace bounds workspace = None if 'workspace' in config.keys(): workspace = Box(np.array(config['workspace']['min_pt']), np.array(config['workspace']['max_pt']), frame='world') # init ros node rospy.init_node('capture_test_images') #NOTE: this is required by the camera sensor classes Logger.reconfigure_root() for sensor_name, sensor_config in config['sensors'].iteritems(): logger.info('Capturing images from sensor %s' %(sensor_name))
) args = parser.parse_args() config_filename = args.config_filename output_dir = args.output_dir # read config config = YamlConfig(config_filename) vis = config["vis"] # make output dir if needed if not os.path.exists(output_dir): os.makedirs(output_dir) # read rescale factor rescale_factor = 1.0 if "rescale_factor" in config.keys(): rescale_factor = config["rescale_factor"] # read workspace bounds workspace = None if "workspace" in config.keys(): workspace = Box( np.array(config["workspace"]["min_pt"]), np.array(config["workspace"]["max_pt"]), frame="world", ) # init ros node rospy.init_node( "capture_test_images" ) # NOTE: this is required by the camera sensor classes
def train_classifier(self, plot_classification=True): #load data, train classifier. Try KLR operator_name = "PushInDir" fit_gp = False fit_rfr = True fn = self.agent_cfg["data_path"] + operator_name + ".npy" data = np.load(fn, allow_pickle=True).all() actual_states = data["actual_next_states"] expected_states = data["expected_next_states"] state = data["states"] actions = data["actions"] input_vec = np.hstack([state, actions]) std_vec = np.std(input_vec, axis=0) std_thresh = 0.01 varying_input_vec = input_vec[:, std_vec > std_thresh] only_color = False varying_input_vec = varying_input_vec[:, [ 1, 0, 2 ]] #for plotting, will give worse results 2,3,4 X = varying_input_vec[:] if only_color: varying_input_vec = varying_input_vec[:, [2]] scaler = preprocessing.StandardScaler().fit(varying_input_vec) input_vec_scaled = scaler.transform(varying_input_vec) deviations = np.linalg.norm(actual_states[:, :3] - expected_states[:, :3], axis=1) #kernel = C(1.0, (1e-3, 1e3)) * Matern(5,(1, 5), nu=2.5) kernel = C(1.0, (1e-3, 1e3)) * Matern(0.5, (0.5, 5), nu=2.5) #kernel = C(1.0, (1e-3, 1e3)) * RBF(1, (1e-3, 1e2)) #[0 and 1 ] of the varying version will be the interesting part gp = GPR(kernel=kernel, n_restarts_optimizer=20) rfr = self.setup_random_forest() if fit_rfr: rfr.fit(input_vec_scaled, deviations) deviations_pred = rfr.predict(input_vec_scaled) if fit_gp: gp.fit(input_vec_scaled, deviations) deviations_pred, sigma = gp.predict(input_vec_scaled, return_std=True) #plot using 2D print("Error", np.linalg.norm(deviations_pred - deviations)) # Input space if plot_classification: from autolab_core import YamlConfig import matplotlib.patches cfg = YamlConfig("cfg/franka_block_push_two_d.yaml") self._board_names = [ name for name in cfg.keys() if "boardpiece" in name ] fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(111) for _board_name in self._board_names: color = cfg[_board_name]['rb_props']["color"] pose = [ cfg[_board_name]["pose"]["x"], cfg[_board_name]["pose"]["y"] ] dims = [ cfg[_board_name]["dims"]["depth"], cfg[_board_name]["dims"]["width"] ] #draw them. pose[0] -= (dims[0] / 2.) pose[1] -= (dims[1] / 2.) patch = matplotlib.patches.Rectangle(pose, dims[0], dims[1], fill=False, edgecolor=color + [ 0.8, ], linewidth=20) ax.add_patch(patch) y = deviations num_pts = 50 x1 = np.linspace(X[:, 0].min() - 0.05, X[:, 0].max() + 0.05, num_pts) # p x2 = np.linspace(X[:, 1].min() - 0.05, X[:, 1].max() + 0.05, num_pts) # q x = (np.array([x1, x2])).T x1x2 = np.array(list(product(x1, x2))) x1x2_incl_color = np.zeros((x1x2.shape[0], X.shape[1])) x1x2_incl_color[:, :2] = x1x2 for i in range(x1x2.shape[0]): x1x2_incl_color[i, 2:] = color_block_is_on( cfg, np.array([x1x2[i, 1], 0, x1x2[i, 0]]))[0] #match the expected pose layout #get corresponding colors here if only_color: if fit_gp: y_pred, MSE = gp.predict(scaler.transform( x1x2_incl_color[:, 1:2]).reshape(-1, 1), return_std=True) else: if fit_gp: y_pred, MSE = gp.predict(scaler.transform(x1x2_incl_color), return_std=True) if fit_rfr: y_pred = rfr.predict(scaler.transform(x1x2_incl_color)) X0p, X1p = x1x2[:, 0].reshape(num_pts, num_pts), x1x2[:, 1].reshape( num_pts, num_pts) Zp = np.reshape(y_pred, (num_pts, num_pts)) # alternative way to generate equivalent X0p, X1p, Zp # X0p, X1p = np.meshgrid(x1, x2) # Zp = [gp.predict([(X0p[i, j], X1p[i, j]) for i in range(X0p.shape[0])]) for j in range(X0p.shape[1])] # Zp = np.array(Zp).T block_shift = 0 #0.07/2 ax.pcolormesh(X0p - block_shift, X1p - block_shift, Zp, cmap="Greens", vmin=-0.01) #ax.scatter(X0p, X1p, c=Zp, cmap="Greens", vmin=-0.1) plot = ax.scatter(X[:, 0] - block_shift, X[:, 1] - block_shift, c=deviations, cmap="Greens", vmin=-0.01, edgecolor="k") #ax.scatter(X[:,0], X[:,1], c=deviations_pred, cmap="Greens", vmin=-0.05) x_max = 0.3 x_min = -0.3 z_max = 0.6 z_min = 0.1 plt.xlim([x_max, x_min]) plt.ylim([z_max, z_min]) plt.colorbar(plot) #fig.colorbar(ax) plt.show()
args = parser.parse_args() name = args.name config_filename = args.config_filename # read config config = YamlConfig(config_filename) output_dir = config['output_dir'] if not os.path.exists(output_dir): os.mkdir(output_dir) output_dir = os.path.join(output_dir, name) if not os.path.exists(output_dir): os.mkdir(output_dir) # read workspace bounds workspace = None if 'workspace' in config.keys(): workspace = Box(np.array(config['workspace']['min_pt']), np.array(config['workspace']['max_pt']), frame='world') for sensor_name, sensor_config in config['sensors'].iteritems(): print('Capturing images from sensor %s' %(sensor_name)) save_dir = os.path.join(output_dir, sensor_name) if not os.path.exists(save_dir): os.mkdir(save_dir) # read params sensor_type = sensor_config['type'] sensor_frame = sensor_config['frame'] # read camera calib
os.makedirs(output_dir) pose_filename = os.path.join(output_dir, '%s_to_world.tf' % (sensor_frame)) T_camera_world.save(pose_filename) intr_filename = os.path.join(output_dir, '%s.intr' % (sensor_frame)) ir_intrinsics.save(intr_filename) f = open( os.path.join(output_dir, 'corners_cb_%s.npy' % (sensor_frame)), 'w') np.save(f, reg_result.cb_points_cam.data) # move the robot to the chessboard center for verification if config['use_robot']: # get robot type robot_type = 'yumi' if 'robot_type' in config.keys(): robot_type = config['robot_type'] # find the rightmost and further cb point in world frame cb_points_world = T_camera_world * reg_result.cb_points_cam cb_point_data_world = cb_points_world.data dir_world = np.array([-1.0, 1.0, 0]) dir_world = dir_world / np.linalg.norm(dir_world) ip = dir_world.dot(cb_point_data_world) # open interface to robot if robot_type == 'ur5': from ur_control import UniversalRobot, ToolState, T_KINEMATIC_AVOIDANCE_WORLD, KINEMATIC_AVOIDANCE_JOINTS robot = UniversalRobot() robot.reset_home() else: