def __init__(self, disable_ft=False): self.urdf = URDF.from_parameter_server() self.overhead_vision_client = actionlib.SimpleActionClient( "recognize_objects", ObjectRecognitionAction) self.execute_trajectory_action = actionlib.SimpleActionClient( "execute_trajectory", ExecuteTrajectoryAction) self.rapid_node = rapid_node_pkg.RAPIDCommander() self.controller_commander = controller_commander_pkg.ControllerCommander( ) self.state = 'init' self.current_target = None self.current_payload = None self.available_payloads = { 'leeward_mid_panel': 'leeward_mid_panel', 'leeward_tip_panel': 'leeward_tip_panel' } self.desired_controller_mode = self.controller_commander.MODE_AUTO_TRAJECTORY self.speed_scalar = 1.0 self.disable_ft = disable_ft self.tf_listener = PayloadTransformListener() self._process_state_pub = rospy.Publisher("process_state", ProcessState, queue_size=100, latch=True) self.publish_process_state() self.update_payload_pose_srv = rospy.ServiceProxy( "update_payload_pose", UpdatePayloadPose) self.get_payload_array_srv = rospy.ServiceProxy( "get_payload_array", GetPayloadArray) self.plan_dictionary = {}
def __init__(self): #Subscribe to controller_state self.controller_state_sub = rospy.Subscriber("controller_state", controllerstate, self.callback) self.last_ros_image_stamp = rospy.Time(0) self.goal_handle=None self.listener = PayloadTransformListener() self.rapid_node = rapid_node_pkg.RAPIDCommander() self.controller_commander = controller_commander_pkg.ControllerCommander() self.object_commander = ObjectRecognitionCommander() # Initilialize aruco boards and parameters self.aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_ARUCO_ORIGINAL) self.parameters = cv2.aruco.DetectorParameters_create() self.parameters.cornerRefinementMethod=cv2.aruco.CORNER_REFINE_SUBPIX self.parameters.adaptiveThreshWinSizeMax=30 self.parameters.adaptiveThreshWinSizeStep=7 # ================== Cam 636 # --- Subscribe to Gripper camera node for image acquisition #TODO: Take in camera names and make subscribers and triggers accordingly self.ros_gripper_2_img_sub = rospy.Subscriber('/gripper_camera_2/image', Image, self.object_commander.ros_raw_gripper_2_image_cb) self.ros_gripper_2_trigger = rospy.ServiceProxy('/gripper_camera_2/trigger', Trigger) # --- Camera parameters self.CamParam = CameraParams() # --- Camera pose #TODO: Substitute transform in here R_Jcam = np.array([[0.9995,-0.0187,-0.0263],[-0.0191,-0.9997,-0.0135],[-0.0261,0.0140,-0.9996]]) r_cam = rox.hat(np.array([0.0707, 1.1395, 0.2747]))#rox.hat(np.array([- 0.2811, -1.1397,0.0335]))#rox.hat(np.array([- 0.2811, 1.1397,0.0335])) self.R_Jcam = np.linalg.inv(np.vstack([ np.hstack([R_Jcam,np.zeros([3,3])]), np.hstack([np.dot(r_cam,R_Jcam),R_Jcam]) ])) self.dt = None self.iteration=0 self.board_ground = None # functions like a gain, used with velocity to track position self.FTdata = None self.ft_flag = False #self.FTdata_0 = self.FTdata #self.FTdata_0est = self.compute_ft_est() #self.result = self.take_image() #TODO: replace with trajopt code self.client = actionlib.SimpleActionClient("joint_trajectory_action", FollowJointTrajectoryAction) # IBVS parameters self.du_converge_TH = None self.dv_converge_TH = None self.iteration_limit = None self.Ki = None # Compliance controller parameters self.F_d_set1 = None self.F_d_set2 = None self.Kc = None self.ros_data=None self.camMatrix=None self.distCoeffs=None self.ros_gripper_2_cam_info_sub=rospy.Subscriber('/gripper_camera_2/camera_info', CameraInfo, self.fill_camera_data)
def __init__(self, context): super(ExperimentGUI, self).__init__(context) # Give QObjects reasonable names self.plans = [ 'Starting Position', 'Above Panel', 'Panel Grabbed', 'Above Placement Nest', 'Panel Placed' ] #state_dict ties each state to planlistindex values #self.state_dict={'reset_position':0,'pickup_prepare':1,'pickup_lower':2,'pickup_grab_first_step':2,'pickup_grab_second_step':2,'pickup_raise':2,'transport_panel':3,'place_lower':4,'place_set_first_step':4,'place_set_second_step':4,'place_raise':4} self.gui_execute_states = [ "reset", "panel_pickup", "pickup_grab", "transport_panel", "place_panel" ] self.execute_states = [ ['plan_to_reset_position', 'move_to_reset_position'], ['plan_pickup_prepare', 'move_pickup_prepare'], [ 'plan_pickup_lower', 'move_pickup_lower', 'plan_pickup_grab_first_step', 'move_pickup_grab_first_step', 'plan_pickup_grab_second_step', 'move_pickup_grab_second_step', 'plan_pickup_raise', 'move_pickup_raise' ], ['plan_transport_payload', 'move_transport_payload'], ['plan_place_set_second_step'] ] self.teleop_modes = [ 'Error', 'Off', 'Joint', 'Cartesian', 'Cylindrical', 'Spherical' ] self.current_teleop_mode = 1 self.teleop_button_string = "Tele-op\nMode:\n" self.setObjectName('MyPlugin') self._lock = threading.Lock() #self._send_event=threading.Event() self.controller_commander = controller_commander_pkg.ControllerCommander( ) # Process standalone plugin command-line arguments from argparse import ArgumentParser parser = ArgumentParser() # Add argument(s) to the parser. parser.add_argument("-q", "--quiet", action="store_true", dest="quiet", help="Put plugin in silent mode") args, unknowns = parser.parse_known_args(context.argv()) if not args.quiet: print 'arguments: ', args print 'unknowns: ', unknowns # Create QWidget self.in_process = None self.recover_from_pause = False #rospy.get_param("rosbag_name") #<param name="start_time" command="date +'%d-%m-%Y_%Ih%Mm%S'"/> #rosbag record args="record -O arg('start_time') self._mainwidget = QWidget() self.layout = QGridLayout() self._mainwidget.setLayout(self.layout) self.disconnectreturnoption = False self.stackedWidget = QStackedWidget() #self._mainwidget) self.layout.addWidget(self.stackedWidget, 0, 0) self._welcomescreen = QWidget() self._runscreen = QWidget() self._errordiagnosticscreen = QWidget() self.stackedWidget.addWidget(self._welcomescreen) self.stackedWidget.addWidget(self._runscreen) self.stackedWidget.addWidget(self._errordiagnosticscreen) #self._data_array=collections.deque(maxlen=500) self._proxy_model = message_proxy_model.MessageProxyModel() self._rospack = rospkg.RosPack() #self.console=console_widget.ConsoleWidget(self._proxy_model,self._rospack) self.robotconnectionled = LEDIndicator() self.robotconnectionled.setDisabled(True) # Make the led non clickable self.forcetorqueled = LEDIndicator() self.forcetorqueled.setDisabled(True) # Make the led non clickable self.overheadcameraled = LEDIndicator() self.overheadcameraled.setDisabled(True) # Make the led non clickable self.grippercameraled = LEDIndicator() self.grippercameraled.setDisabled(True) # Make the led non clickable self.runscreenstatusled = LEDIndicator() self.runscreenstatusled.setDisabled(True) self.step_executor = GUI_Step_Executor() self.step_executor.error_signal.connect(self._feedback_receive) #self.step_executor.error_function=self._feedback_receive #Need this to pause motions self.process_client = actionlib.ActionClient('process_step', ProcessStepAction) self.process_client.wait_for_server() self.placement_targets = { 'leeward_mid_panel': 'panel_nest_leeward_mid_panel_target', 'leeward_tip_panel': 'panel_nest_leeward_tip_panel_target' } self.placement_target = 'panel_nest_leeward_mid_panel_target' self.panel_type = 'leeward_mid_panel' self.client_handle = None service_list = rosservice.get_service_list() if ('/overhead_camera/trigger' in service_list): self.led_change(self.overheadcameraled, True) else: self.led_change(self.overheadcameraled, False) if ('/gripper_camera_2/trigger' in service_list): self.led_change(self.grippercameraled, True) else: self.led_change(self.grippercameraled, False) self.mode = 0 #self.rewound=False self.count = 0 self.data_count = 0 self.force_torque_data = np.zeros((6, 1)) self.joint_angle_data = np.zeros((6, 1)) # Get path to UI file which should be in the "resource" folder of this package self.welcomescreenui = os.path.join( rospkg.RosPack().get_path('rpi_arm_composites_manufacturing_gui'), 'resource', 'welcomeconnectionscreen.ui') self.runscreenui = os.path.join( rospkg.RosPack().get_path('rpi_arm_composites_manufacturing_gui'), 'resource', 'Runscreen.ui') self.errorscreenui = os.path.join( rospkg.RosPack().get_path('rpi_arm_composites_manufacturing_gui'), 'resource', 'errordiagnosticscreen.ui') self.rewound = False # Extend the widget with all attributes and children from UI file loadUi(self.welcomescreenui, self._welcomescreen) loadUi(self.runscreenui, self._runscreen) loadUi(self.errorscreenui, self._errordiagnosticscreen) # Give QObjects reasonable names self._mainwidget.setObjectName('MyPluginUi') # Show _widget.windowTitle on left-top of each plugin (when # it's set in _widget). This is useful when you open multiple # plugins at once. Also if you open multiple instances of your # plugin at once, these lines add number to make it easy to # tell from pane to pane. if context.serial_number() > 1: self._mainwidget.setWindowTitle(self._mainwidget.windowTitle() + (' (%d)' % context.serial_number())) context.add_widget(self._mainwidget) self.context = context self.plugin_settings = None self.instance_settings = None #self._errordiagnosticscreen.consoleWidget=console_widget.ConsoleWidget(self._proxy_model,self._rospack) #####consoleiagnosticscreen.backToRun.pressed.connect(self._to_run_screen) #self._runscThread=ConsoleThread(self._widget.State_info) # self._welcomescreen.statusFormLayout.takeAt(0) self._welcomescreen.statusFormLayout.addWidget(self.robotconnectionled, 0, 0) self._welcomescreen.statusFormLayout.addWidget(self.forcetorqueled, 2, 0) self._welcomescreen.statusFormLayout.addWidget(self.overheadcameraled, 4, 0) self._welcomescreen.statusFormLayout.addWidget(self.grippercameraled, 6, 0) self._runscreen.connectionLayout.addWidget(self.runscreenstatusled, 0, 1) #self._welcomescreen.robotConnectionWidget.addWidget(self.led) #consoleThread.finished.connect(app.exit) #####consoleThread.start() self.rviz_starter = os.path.join( rospkg.RosPack().get_path('rpi_arm_composites_manufacturing_gui'), 'src', 'rpi_arm_composites_manufacturing_gui', 'rviz_starter.py') # Add widget to the user interface #context.add_widget(console)==QDialog.Accepted #context.add_widget(rqt_console) for entry in self.plans: listentry = QListWidgetItem(entry) listentry.setFlags(Qt.ItemIsSelectable) self._runscreen.planList.addItem(listentry) self._runscreen.planList.item(0).setSelected(True) self.planListIndex = 0 self._runscreen.panelType.setText(self.panel_type) self._runscreen.placementNestTarget.setText("Leeward Mid Panel Nest") self._runscreen.panelType.setReadOnly(True) self._runscreen.placementNestTarget.setReadOnly(True) self.commands_sent = False rospy.Subscriber("controller_state", controllerstate, self.callback) self._set_controller_mode = rospy.ServiceProxy("set_controller_mode", SetControllerMode) rospy.Subscriber("GUI_state", ProcessState, self.process_state_set) #rospy.Subscriber('gui_error', String, self._feedback_receive()) self.force_torque_plot_widget = QWidget() self.joint_angle_plot_widget = QWidget() self._welcomescreen.openConfig.clicked.connect( self._open_config_options) #self._welcomescreen.openAdvancedOptions.pressed.connect(self._open_login_prompt) self._welcomescreen.toRunScreen.pressed.connect(self._to_run_screen) self._runscreen.backToWelcome.pressed.connect(self._to_welcome_screen) #self._runscreen.toErrorScreen.pressed.connect(self._to_error_screen) self._runscreen.nextPlan.pressed.connect(self._next_plan) self._runscreen.previousPlan.pressed.connect(self._previousPlan) self._runscreen.resetToHome.pressed.connect(self._reset_position) self._runscreen.stopPlan.pressed.connect(self._stopPlan) self._runscreen.accessTeleop.pressed.connect(self.change_teleop_modes) #self._errordiagnosticscreen.openOverheadCameraView.pressed.connect(self._open_overhead_camera_view) #self._errordiagnosticscreen.openGripperCameraViews.pressed.connect(self._open_gripper_camera_views) self._errordiagnosticscreen.openForceTorqueDataPlot.pressed.connect( self._open_force_torque_data_plot) self._errordiagnosticscreen.openJointAngleDataPlot.pressed.connect( self._open_joint_angle_data_plot) self._errordiagnosticscreen.backToRun.pressed.connect( self._to_run_screen)
def main(): step_ts = 0.004 rospy.init_node("test_moveit_commander_custom_trajectory", anonymous=True) rospy.Subscriber("controller_state", ControllerState, callback_function) robot = urdf.robot_from_parameter_server() controller_commander = controller_commander_pkg.ControllerCommander() controller_commander.set_controller_mode( controller_commander.MODE_AUTO_TRAJECTORY, 0.4, [], []) time.sleep(0.5) tran = np.array( [2.197026484647054, 1.2179574262842452, 0.12376598588449844]) rot = np.array([[-0.99804142, 0.00642963, 0.06222524], [0.00583933, 0.99993626, -0.00966372], [-0.06228341, -0.00928144, -0.99801535]]) pose_target2 = rox.Transform(rot, tran) pose_target2.p[2] += 0.20 print 'Target:', pose_target2 print "============ Move Close to Panel" controller_commander.compute_cartesian_path_and_move( pose_target2, avoid_collisions=False) controller_commander.set_controller_mode( controller_commander.MODE_AUTO_TRAJECTORY, 1.0, [], []) time.sleep(1.0) Kc = 0.004 time_save = [] FTdata_save = [] Tran_z = np.array([[0, 0, -1], [0, -1, 0], [1, 0, 0]]) Vec_wrench = 100 * np.array([ 0.019296738361905, 0.056232033265447, 0.088644197659430, 0.620524934626544, -0.517896661195076, 0.279323567303444, -0.059640563813256, 0.631460085138371, -0.151143175570223, -6.018321330845553 ]).transpose() listener = PayloadTransformListener() rapid_node = rapid_node_pkg.RAPIDCommander() #controller_commander=controller_commander_pkg.arm_composites_manufacturing_controller_commander() time.sleep(1.0) FTdata_0 = FTdata T = listener.lookupTransform("base", "link_6", rospy.Time(0)) rg = 9.8 * np.matmul( np.matmul(T.R, Tran_z).transpose(), np.array([0, 0, 1]).transpose()) A1 = np.hstack([ rox.hat(rg).transpose(), np.zeros([3, 1]), np.eye(3), np.zeros([3, 3]) ]) A2 = np.hstack( [np.zeros([3, 3]), rg.reshape([3, 1]), np.zeros([3, 3]), np.eye(3)]) A = np.vstack([A1, A2]) FTdata_0est = np.matmul(A, Vec_wrench) #print 'Test4:',controller_commander.ControllerState for i in range(400): tic = time.time() plan = RobotTrajectory() plan.joint_trajectory.joint_names = [ 'joint_1', 'joint_2', 'joint_3', 'joint_4', 'joint_5', 'joint_6' ] current_joint_angles = controller_commander.get_current_joint_values() plan.joint_trajectory.header.frame_id = '/world' p1 = JointTrajectoryPoint() p1.positions = current_joint_angles p1.velocities = np.zeros((6, )) p1.accelerations = np.zeros((6, )) p1.time_from_start = rospy.Duration(0) T = listener.lookupTransform("base", "link_6", rospy.Time(0)) rg = 9.8 * np.matmul( np.matmul(T.R, Tran_z).transpose(), np.array([0, 0, 1]).transpose()) A1 = np.hstack([ rox.hat(rg).transpose(), np.zeros([3, 1]), np.eye(3), np.zeros([3, 3]) ]) A2 = np.hstack([ np.zeros([3, 3]), rg.reshape([3, 1]), np.zeros([3, 3]), np.eye(3) ]) A = np.vstack([A1, A2]) FTdata_est = np.matmul(A, Vec_wrench) #print 'Test0:',FTdata,FTdata_0,FTdata_est,FTdata_0est FTread = FTdata - FTdata_0 - FTdata_est + FTdata_0est print 'FTread:', FTread print 'FT:', FTdata print 'Z', FTread[-1] if FTread[-1] > -200: F_d = -350 else: F_d = -400 J = rox.robotjacobian(robot, current_joint_angles) Vz = Kc * (F_d - FTread[-1]) joints_vel = np.linalg.pinv(J).dot(np.array([0, 0, 0, 0, 0, Vz])) print 'Joint_command:', joints_vel.dot(step_ts) p2 = JointTrajectoryPoint() p2.positions = np.array(p1.positions) + joints_vel.dot( step_ts) #np.array([0,np.deg2rad(-2),0,0,0,0]) p2.velocities = np.zeros((6, )) p2.accelerations = np.zeros((6, )) p2.time_from_start = rospy.Duration(step_ts) # p3=JointTrajectoryPoint() # p3.positions = np.array(p1.positions) + np.array([0,np.deg2rad(2),0,0,0,0]) # p3.velocities = np.zeros((6,)) # p3.accelerations = np.zeros((6,)) # p3.time_from_start = rospy.Duration(4) plan.joint_trajectory.points.append(p1) plan.joint_trajectory.points.append(p2) # plan.joint_trajectory.points.append(p3) controller_commander.execute(plan) print 'Time:', time.time() - tic # print 'Joint:',controller_commander.get_current_joint_values() time_save.append(time.time()) FTdata_save.append(FTread) filename = "FTdata.txt" f_handle = file(filename, 'a') np.savetxt(f_handle, FTdata_save) f_handle.close() filename = "Time.txt" f_handle = file(filename, 'a') np.savetxt(f_handle, time_save) f_handle.close()
def main(): rospy.init_node("test_moveit_commander_custom_trajectory", anonymous=True) final_pos = None controller_commander = controller_commander_pkg.ControllerCommander() process_state_pub = rospy.Publisher("GUI_state", ProcessState, queue_size=100, latch=True) transport1 = os.path.join( rospkg.RosPack().get_path('rpi_arm_composites_manufacturing_gui'), 'src', 'rpi_arm_composites_manufacturing_gui', 'Joint_all_Panel1.txt') transport2 = os.path.join( rospkg.RosPack().get_path('rpi_arm_composites_manufacturing_gui'), 'src', 'rpi_arm_composites_manufacturing_gui', 'Joint_all_Panel2.txt') controller_commander.set_controller_mode( controller_commander.MODE_AUTO_TRAJECTORY, 0.1, [], []) controller_commander.set_controller_mode( controller_commander.MODE_AUTO_TRAJECTORY, 0.5, [], []) if ('leeward_mid_panel' in sys.argv): waypoints = np.loadtxt(transport1, comments="#", delimiter=",", unpack=False) panel_type = 'leeward_mid_panel' final_pos = np.array([ 0.5201124869846093235, 1.076912628142645101, -0.6322520329564644825, -0.008162611460859465345, 1.131674733905407404, 0.4480798367216944356 ]) elif ('leeward_tip_panel' in sys.argv): waypoints = np.loadtxt(transport2, comments="#", delimiter=",", unpack=False) panel_type = 'leeward_tip_panel' final_pos = np.array([ -0.4619627594947815, 1.0956398248672485, -0.7398647665977478, 0.01724303886294365, 1.1420093774795532, -0.49965518712997437 ]) plan0 = RobotTrajectory() plan0.joint_trajectory.joint_names = [ 'joint_1', 'joint_2', 'joint_3', 'joint_4', 'joint_5', 'joint_6' ] plan0.joint_trajectory.header.frame_id = '/world' p1 = JointTrajectoryPoint() p1.positions = controller_commander.get_current_joint_values() p1.velocities = np.zeros((6, )) p1.accelerations = np.zeros((6, )) p1.time_from_start = rospy.Duration(0) p2 = JointTrajectoryPoint() p2.positions = np.array(waypoints[0, :]) p2.velocities = np.zeros((6, )) p2.accelerations = np.zeros((6, )) t2 = 3 p2.time_from_start = rospy.Duration(t2) plan0.joint_trajectory.points.append(p1) plan0.joint_trajectory.points.append(p2) controller_commander.execute(plan0) plan = RobotTrajectory() plan.joint_trajectory.joint_names = [ 'joint_1', 'joint_2', 'joint_3', 'joint_4', 'joint_5', 'joint_6' ] plan.joint_trajectory.header.frame_id = '/world' p1 = JointTrajectoryPoint() p1.positions = controller_commander.get_current_joint_values() p1.velocities = np.zeros((6, )) p1.accelerations = np.zeros((6, )) p1.time_from_start = rospy.Duration(0) p2 = JointTrajectoryPoint() p2.positions = np.array(waypoints[0, :]) p2.velocities = np.zeros((6, )) p2.accelerations = np.zeros((6, )) t2 = 0.5 p2.time_from_start = rospy.Duration(t2) plan.joint_trajectory.points.append(p1) plan.joint_trajectory.points.append(p2) cnt = 0.0 for w in waypoints: cnt = cnt + 1 #if cnt%10==0: p3 = JointTrajectoryPoint() p3.positions = np.array(w) p3.velocities = np.zeros((6, )) p3.accelerations = np.zeros((6, )) p3.time_from_start = rospy.Duration(t2 + cnt * 0.01) print p3.time_from_start.to_sec() plan.joint_trajectory.points.append(p3) p3 = JointTrajectoryPoint() p3.positions = final_pos #p3.positions = np.array([-0.4619627594947815, 1.0956398248672485, -0.7398647665977478, 0.01724303886294365, 1.1420093774795532, -0.49965518712997437]) # panel 2 p3.velocities = np.zeros((6, )) p3.accelerations = np.zeros((6, )) p3.time_from_start = rospy.Duration(t2 + cnt * 0.01 + 3) plan.joint_trajectory.points.append(p3) controller_commander.execute(plan) #Process state publish to notify GUI to free user inputs and alert that process is finished" s = ProcessState() s.state = "move_transport_payload" s.payload = panel_type s.target = "" process_state_pub.publish(s)
def main(): #Start timer to measure execution time t1 = time.time() #Subscribe to controller_state rospy.Subscriber("controller_state", controllerstate, callback) last_ros_image_stamp = rospy.Time(0) #Force-torque if not "disable-ft" in sys.argv: ft_threshold1 = ft_threshold else: ft_threshold1 = [] #Initialize ros node of this process rospy.init_node('Placement_DJ_1', anonymous=True) process_client = actionlib.SimpleActionClient('process_step', ProcessStepAction) process_state_pub = rospy.Publisher("GUI_state", ProcessState, queue_size=100, latch=True) process_client.wait_for_server() listener = PayloadTransformListener() rapid_node = rapid_node_pkg.RAPIDCommander() controller_commander = controller_commander_pkg.ControllerCommander() object_commander = ObjectRecognitionCommander() robot = urdf.robot_from_parameter_server() #subscribe to Gripper camera node for image acquisition ros_gripper_2_img_sub = rospy.Subscriber( '/gripper_camera_2/image', Image, object_commander.ros_raw_gripper_2_image_cb) ros_gripper_2_trigger = rospy.ServiceProxy( '/gripper_camera_2/camera_trigger', Trigger) ros_gripper_2_trigger = rospy.ServiceProxy( 'gripper_camera_2/camera_trigger', Trigger) #Set controller command mode controller_commander.set_controller_mode( controller_commander.MODE_AUTO_TRAJECTORY, 0.4, [], []) time.sleep(0.5) #open loop set the initial pose #Set Location above the panel where the end effector goes first (in the world/base frame) Ideal location of panel. #Cur_Pose = controller_commander.get_current_pose_msg() #print Cur_Pose #raw_input("confirm release vacuum") tran = np.array( [2.15484, 1.21372, 0.25766]) #np.array([2.17209718963,-1.13702651252,0.224273328841]) rot = rox.q2R( [0.02110, -0.03317, 0.99922, -0.00468] ) #rox.q2R([0.0145063655084, -0.0282932350764, 0.999322921073, -0.0185137145776]) #tran = np.array([2.26476414097,-1.22419669418,0.411353037002]) #rot = np.array([[-0.9959393, -0.0305329, 0.0846911], [-0.0310315, 0.9995079, -0.0045767], [-0.0845097, -0.0071862, -0.9963967]]) pose_target2 = rox.Transform(rot, tran) pose_target3 = copy.deepcopy(pose_target2) ##Execute movement to set location print "Executing initial path" controller_commander.compute_cartesian_path_and_move( pose_target2, avoid_collisions=False) #Initilialize aruco boards and parameters aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_ARUCO_ORIGINAL) parameters = cv2.aruco.DetectorParameters_create() parameters.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX parameters.adaptiveThreshWinSizeMax = 30 parameters.adaptiveThreshWinSizeStep = 7 # 1st Panel tag info board_ground = cv2.aruco.GridBoard_create(4, 4, .04, .0075, aruco_dict, 32) board_panel = cv2.aruco.GridBoard_create(8, 3, .025, .0075, aruco_dict, 80) #Load object points ground tag in panel tag coordinate system from mat file loaded_object_points_ground_in_panel_system_stage_1 = loadmat( '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat' )['object_points_ground_in_panel_tag_system'] loaded_object_points_ground_in_panel_system_stage_2 = loadmat( '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat' )['object_points_ground_in_panel_tag_system'] loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_1 # #Initialize camera intrinsic parameters #18285636_10_05_2018_5_params #18285636_10_05_2018_5_params_111122018 CamParam = CameraParams(2283.391289766133, 1192.255485086403, 2279.484382094687, 1021.399092147012, 1.0, -0.022101211408596, -0.095163053709332, -0.003986991791212, -0.003479613658352, 0.179926705467534) #Subscribe tp controller state. Again? rospy.Subscriber("controller_state", controllerstate, callback) UV = np.zeros([150, 8]) P = np.zeros([150, 3]) #focal length in pixel units, this number is averaged values from f_hat for x and y f_hat_u = 2283.391289766133 #2342.561249990927#2282.523358266698#2281.339593273153 #2446.88 f_hat_v = 2279.484382094687 #2338.448312671424#2280.155828279608 #functions like a gain, used with velocity to track position dt = 0.1 du = 100.0 dv = 100.0 dutmp = 100.0 dvtmp = 100.0 #TimeGain = [0.1,0.1, 0.1] du_array = [] dv_array = [] dx_array = [] iteration = 0 stage = 1 #step_ts = 0.004 Kc = 0.0002 #time_save = [] #FTdata_save = [] Tran_z = np.array([[0, 0, -1], [0, -1, 0], [1, 0, 0]]) Vec_wrench = 100 * np.array([ 0.019296738361905, 0.056232033265447, 0.088644197659430, 0.620524934626544, -0.517896661195076, 0.279323567303444, -0.059640563813256, 0.631460085138371, -0.151143175570223, -6.018321330845553 ]).transpose() FTdata_0 = FTdata T = listener.lookupTransform("base", "link_6", rospy.Time(0)) rg = 9.8 * np.matmul( np.matmul(T.R, Tran_z).transpose(), np.array([0, 0, 1]).transpose()) A1 = np.hstack([ rox.hat(rg).transpose(), np.zeros([3, 1]), np.eye(3), np.zeros([3, 3]) ]) A2 = np.hstack( [np.zeros([3, 3]), rg.reshape([3, 1]), np.zeros([3, 3]), np.eye(3)]) A = np.vstack([A1, A2]) FTdata_0est = np.matmul(A, Vec_wrench) FTread_array = [] FTdata_array = [] t_now_array = [] step_size = [] cv2.namedWindow('Image', cv2.WINDOW_NORMAL) cv2.resizeWindow('Image', 490, 410) ### PBVS set the initial pose #### Final Nest Placement Error Calculation =============================== #Read new image last_ros_image_stamp = object_commander.ros_image_stamp try: ros_gripper_2_trigger.wait_for_service(timeout=0.1) ros_gripper_2_trigger() except: pass wait_count = 0 while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp: if wait_count > 50: raise Exception("Image receive timeout") time.sleep(0.25) wait_count += 1 result = object_commander.ros_image #Detect tag corners in aqcuired image using aruco corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers( result, aruco_dict, parameters=parameters) #Sort corners and ids according to ascending order of ids corners_original = copy.deepcopy(corners) ids_original = np.copy(ids) sorting_indices = np.argsort(ids_original, 0) ids_sorted = ids_original[sorting_indices] ids_sorted = ids_sorted.reshape([len(ids_original), 1]) combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original) combined_list.sort() corners_sorted = [x for y, x in combined_list] ids = np.copy(ids_sorted) corners = copy.deepcopy(corners_sorted) #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image) #false_ids_ind = np.where(ids>73) mask = np.ones(ids.shape, dtype=bool) #mask[false_ids_ind] = False ids = ids[mask] corners = np.array(corners) corners = corners[mask.flatten(), :] corners = list(corners) #Define object and image points of both tags objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints( board_ground, corners, ids) objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints( board_panel, corners, ids) objPoints_ground = objPoints_ground.reshape([objPoints_ground.shape[0], 3]) imgPoints_ground = imgPoints_ground.reshape([imgPoints_ground.shape[0], 2]) objPoints_panel = objPoints_panel.reshape([objPoints_panel.shape[0], 3]) imgPoints_panel = imgPoints_panel.reshape([imgPoints_panel.shape[0], 2]) #Save pose of marker boards after the iterations end(while in the final hovering position above nest) #Get pose of both ground and panel markers from detected corners retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP( objPoints_ground, imgPoints_ground, CamParam.camMatrix, CamParam.distCoeff) Rca_ground, b_ground = cv2.Rodrigues(rvec_ground) retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP( objPoints_panel, imgPoints_panel, CamParam.camMatrix, CamParam.distCoeff) Rca_panel, b_panel = cv2.Rodrigues(rvec_panel) print "============== Observed Pose difference in nest position" observed_tvec_difference = tvec_ground - tvec_panel observed_rvec_difference = rvec_ground - rvec_panel print "tvec difference: ", observed_tvec_difference print "rvec differnece: ", observed_rvec_difference #Load ideal pose differnece information from file loaded_rvec_difference = loadmat( '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat' )['rvec_difference'] loaded_tvec_difference = loadmat( '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat' )['tvec_difference'] print "============== Ideal Pose difference in nest position" print "tvec difference: ", loaded_tvec_difference print "rvec differnece: ", loaded_rvec_difference print "============== Difference in pose difference in nest position" tvec_err = loaded_tvec_difference - observed_tvec_difference rvec_err = loaded_rvec_difference - observed_rvec_difference print "tvec difference: ", tvec_err print "rvec differnece: ", rvec_err # Adjustment print "Adjustment ====================" current_joint_angles = controller_commander.get_current_joint_values() dx = np.array([0, 0, 0, -tvec_err[0], tvec_err[1] + 0.03, tvec_err[2] ]) * 0.75 joints_vel = QP_abbirb6640( np.array(current_joint_angles).reshape(6, 1), np.array(dx)) goal = trapezoid_gen( np.array(current_joint_angles) + joints_vel.dot(1), np.array(current_joint_angles), 0.25, np.array(dx)) client = actionlib.SimpleActionClient("joint_trajectory_action", FollowJointTrajectoryAction) client.wait_for_server() client.send_goal(goal) client.wait_for_result() res = client.get_result() if (res.error_code != 0): raise Exception("Trajectory execution returned error") print res #raw_input("confirm move...") print "End of Initial Pose ====================" ### End of initial pose step_size_min = 100000 stage = 2 loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_2 while ( (du > 4) | (dv > 4) and (iteration < 55) ): #try changing du and dv to lower values(number of iterations increases) iteration += 1 t_now_array.append(time.time()) #Go to stage2 of movement(mostly downward z motion) if ((du < 4) and (dv < 4) and (stage == 1)): #Save pose of marker boards after the iterations end(while in the final hovering position above nest) #Get pose of both ground and panel markers from detected corners retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP( objPoints_ground, imgPoints_ground, CamParam.camMatrix, CamParam.distCoeff) Rca_ground, b_ground = cv2.Rodrigues(rvec_ground) retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP( objPoints_panel, imgPoints_panel, CamParam.camMatrix, CamParam.distCoeff) Rca_panel, b_panel = cv2.Rodrigues(rvec_panel) print "============== Observed Pose difference in hovering position" observed_tvec_difference = tvec_ground - tvec_panel observed_rvec_difference = rvec_ground - rvec_panel print "tvec difference: ", observed_tvec_difference print "rvec differnece: ", observed_rvec_difference #Load ideal pose differnece information from file loaded_rvec_difference = loadmat( '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat' )['rvec_difference'] loaded_tvec_difference = loadmat( '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat' )['tvec_difference'] print "============== Ideal Pose difference in hovering position" print "tvec difference: ", loaded_tvec_difference print "rvec differnece: ", loaded_rvec_difference tvec_difference_Above_Nest = loaded_tvec_difference - observed_tvec_difference rvec_difference_Above_Nest = loaded_rvec_difference - observed_rvec_difference print "============== Difference in pose difference in hovering position" print "tvec difference: ", tvec_difference_Above_Nest print "rvec differnece: ", rvec_difference_Above_Nest #Saving pose information to file filename_pose1 = "/home/rpi-cats/Desktop/DJ/Code/Data/Panel1_Above_Nest_Pose_" + str( t1) + ".mat" scipy.io.savemat(filename_pose1, mdict={ 'tvec_ground_Above_Nest': tvec_ground, 'tvec_panel_Above_Nest': tvec_panel, 'Rca_ground_Above_Nest': Rca_ground, 'Rca_panel_Above_Nest': Rca_panel, 'tvec_difference_Above_Nest': tvec_ground - tvec_panel, 'rvec_difference_Above_Nest': rvec_ground - rvec_panel, 'loaded_tvec_difference': loaded_tvec_difference, 'loaded_rvec_difference': loaded_rvec_difference, 'observed_tvec_difference': observed_tvec_difference, 'observed_rvec_difference': observed_rvec_difference }) #raw_input("Confirm Stage 2") stage = 2 # dt=0.02 loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_2 #print pose_target2.p[2] #Print current robot pose at the beginning of this iteration Cur_Pose = controller_commander.get_current_pose_msg() print "============ Current Robot Pose" print Cur_Pose #Read new image last_ros_image_stamp = object_commander.ros_image_stamp try: ros_gripper_2_trigger.wait_for_service(timeout=0.1) ros_gripper_2_trigger() except: pass wait_count = 0 while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp: if wait_count > 50: raise Exception("Image receive timeout") time.sleep(0.25) wait_count += 1 result = object_commander.ros_image #Save # filename = "Acquisition3_%d.jpg" % (time.time()) # scipy.misc.imsave(filename, result) #Display # cv2.namedWindow('Aqcuired Image',cv2.WINDOW_NORMAL) # cv2.imshow('Acquired Image',result) # cv2.waitKey(1) #Detect tag corners in aqcuired image using aruco corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers( result, aruco_dict, parameters=parameters) frame_with_markers_and_axis = result #Sort corners and ids according to ascending order of ids corners_original = copy.deepcopy(corners) ids_original = np.copy(ids) sorting_indices = np.argsort(ids_original, 0) ids_sorted = ids_original[sorting_indices] ids_sorted = ids_sorted.reshape([len(ids_original), 1]) combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original) combined_list.sort() corners_sorted = [x for y, x in combined_list] ids = np.copy(ids_sorted) corners = copy.deepcopy(corners_sorted) #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image) false_ids_ind = np.where(ids > 150) mask = np.ones(ids.shape, dtype=bool) mask[false_ids_ind] = False ids = ids[mask] corners = np.array(corners) corners = corners[mask.flatten(), :] corners = list(corners) #Define object and image points of both tags objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints( board_ground, corners, ids) objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints( board_panel, corners, ids) objPoints_ground = objPoints_ground.reshape( [objPoints_ground.shape[0], 3]) imgPoints_ground = imgPoints_ground.reshape( [imgPoints_ground.shape[0], 2]) objPoints_panel = objPoints_panel.reshape( [objPoints_panel.shape[0], 3]) imgPoints_panel = imgPoints_panel.reshape( [imgPoints_panel.shape[0], 2]) #Get pose of both ground and panel markers from detected corners retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP( objPoints_ground, imgPoints_ground, CamParam.camMatrix, CamParam.distCoeff) Rca_ground, b_ground = cv2.Rodrigues(rvec_ground) retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP( objPoints_panel, imgPoints_panel, CamParam.camMatrix, CamParam.distCoeff) Rca_panel, b_panel = cv2.Rodrigues(rvec_panel) frame_with_markers_and_axis = cv2.aruco.drawAxis( frame_with_markers_and_axis, CamParam.camMatrix, CamParam.distCoeff, Rca_ground, tvec_ground, 0.2) frame_with_markers_and_axis = cv2.aruco.drawAxis( frame_with_markers_and_axis, CamParam.camMatrix, CamParam.distCoeff, Rca_panel, tvec_panel, 0.2) rvec_all_markers_ground, tvec_all_markers_ground, _ = aruco.estimatePoseSingleMarkers( corners[0:20], 0.04, CamParam.camMatrix, CamParam.distCoeff) rvec_all_markers_panel, tvec_all_markers_panel, _ = aruco.estimatePoseSingleMarkers( corners[20:45], 0.025, CamParam.camMatrix, CamParam.distCoeff) tvec_all = np.concatenate( (tvec_all_markers_ground, tvec_all_markers_panel), axis=0) for i_ids, i_corners, i_tvec in zip(ids, corners, tvec_all): UV[i_ids, :] = i_corners.reshape( [1, 8]) #np.average(i_corners, axis=1) P[i_ids, :] = i_tvec #print 'P',P #used to find the height of the tags and the delta change of height, z height at desired position Z = 1 * P[32:48, 2] #- [0.68184539, 0.68560932, 0.68966803, 0.69619578]) #check if all tags detected if retVal_ground != 0 and retVal_panel != 0: dutmp = [] dvtmp = [] #Pixel estimates of the ideal ground tag location reprojected_imagePoints_ground_2, jacobian2 = cv2.projectPoints( loaded_object_points_ground_in_panel_system.transpose(), rvec_panel, tvec_panel, CamParam.camMatrix, CamParam.distCoeff) reprojected_imagePoints_ground_2 = reprojected_imagePoints_ground_2.reshape( [reprojected_imagePoints_ground_2.shape[0], 2]) #plot image points for ground tag from corner detection and from re-projections for point1, point2 in zip( imgPoints_ground, np.float32(reprojected_imagePoints_ground_2)): cv2.circle(frame_with_markers_and_axis, tuple(point1), 5, (0, 0, 255), 3) cv2.circle(frame_with_markers_and_axis, tuple(point2), 5, (255, 0, 0), 3) cv2.imshow('Image', frame_with_markers_and_axis) cv2.waitKey(1) #Save filename_image = "/home/rpi-cats/Desktop/DJ/Code/Images/Panel1_Acquisition_" + str( t1) + "_" + str(iteration) + ".jpg" scipy.misc.imsave(filename_image, frame_with_markers_and_axis) #Go through a particular point in all tags to build the complete Jacobian for ic in range(4): #uses first set of tags, numbers used to offset camera frame, come from camera parameters #UV_target = np.vstack([UV[9:13,2*ic]-1209.151959040735,UV[9:13,2*ic+1]-1055.254852652610]).T - UV_shift[:,(2*ic):(2*ic+2)] #shift corner estimates to the image centers. Necessary for the image jacobian to work. reprojected_imagePoints_ground_2 = np.reshape( reprojected_imagePoints_ground_2, (16, 8)) UV_target = np.vstack([ reprojected_imagePoints_ground_2[:, 2 * ic] - 1192.255485086403, reprojected_imagePoints_ground_2[:, 2 * ic + 1] - 1021.399092147012 ]).T uc = UV_target[:, 0] vc = UV_target[:, 1] # print 'UV_target', UV_target UV_current = np.vstack([ UV[32:48, 2 * ic] - 1192.255485086403, UV[32:48, 2 * ic + 1] - 1021.399092147012 ]).T #find difference between current and desired tag difference delta_UV = UV_target - UV_current dutmp.append(np.mean(delta_UV[:, 0])) dvtmp.append(np.mean(delta_UV[:, 1])) for tag_i in range(16): if tag_i == 0: J_cam_tmp = 1.0 * np.array( [[ -f_hat_u / Z[tag_i], 0.0, uc[tag_i] / Z[tag_i], uc[tag_i] * vc[tag_i] / f_hat_u, -1.0 * (uc[tag_i] * uc[tag_i] / f_hat_u + f_hat_u), vc[tag_i] ], [ 0.0, -f_hat_v / Z[tag_i], vc[tag_i] / Z[tag_i], vc[tag_i] * vc[tag_i] / f_hat_v + f_hat_v, -1.0 * uc[tag_i] * vc[tag_i] / f_hat_v, -uc[tag_i] ]]) else: J_cam_tmp = np.concatenate( (J_cam_tmp, 1.0 * np.array( [[ -f_hat_u / Z[tag_i], 0.0, uc[tag_i] / Z[tag_i], uc[tag_i] * vc[tag_i] / f_hat_u, -1.0 * (uc[tag_i] * uc[tag_i] / f_hat_u + f_hat_u), vc[tag_i] ], [ 0.0, -f_hat_v / Z[tag_i], vc[tag_i] / Z[tag_i], vc[tag_i] * vc[tag_i] / f_hat_v + f_hat_v, -1.0 * uc[tag_i] * vc[tag_i] / f_hat_v, -uc[tag_i] ]])), axis=0) #camera jacobian if ic == 0: J_cam = J_cam_tmp delta_UV_all = delta_UV.reshape(32, 1) UV_target_all = UV_target.reshape(32, 1) else: J_cam = np.vstack([J_cam, J_cam_tmp]) delta_UV_all = np.vstack( [delta_UV_all, delta_UV.reshape(32, 1)]) UV_target_all = np.vstack( [UV_target_all, UV_target.reshape(32, 1)]) print 'dutmp: ', dutmp print 'dvtmp: ', dvtmp du = np.mean(np.absolute(dutmp)) dv = np.mean(np.absolute(dvtmp)) print 'Average du of all points:', du, 'Average dv of all points:', dv du_array.append(du) dv_array.append(dv) #print "Jcam",J_cam #print "UV",delta_UV_all #dx = np.matmul(np.linalg.pinv(J_cam),np.array(delta_UV_all)) dx = QP_Cam( J_cam, delta_UV_all ) #np.matmul(np.linalg.pinv(J_cam),np.array(delta_UV_all)) dx = dx.reshape([6, 1]) dx_array.append(dx) #print '**************Jac:',dx, QP_Cam(J_cam,delta_UV_all) dx = np.array([ dx[3, 0], -dx[4, 0], -dx[5, 0], dx[0, 0], -dx[1, 0], -dx[2, 0] ]) if stage == 2: T = listener.lookupTransform("base", "link_6", rospy.Time(0)) rg = 9.8 * np.matmul( np.matmul(T.R, Tran_z).transpose(), np.array([0, 0, 1]).transpose()) A1 = np.hstack([ rox.hat(rg).transpose(), np.zeros([3, 1]), np.eye(3), np.zeros([3, 3]) ]) A2 = np.hstack([ np.zeros([3, 3]), rg.reshape([3, 1]), np.zeros([3, 3]), np.eye(3) ]) A = np.vstack([A1, A2]) FTdata_est = np.matmul(A, Vec_wrench) FTread = FTdata - FTdata_0 - FTdata_est + FTdata_0est print 'FTread:', FTread print 'Z', FTread[-1] if FTread[-1] > -100: F_d = -150 Kc = 0.0001 else: Kc = 0.0001 F_d = -200 Vz = Kc * (F_d - FTread[-1]) print 'Vz', Vz dx[-1] = Vz + dx[-1] FTread_array.append(FTread) FTdata_array.append(FTdata) current_joint_angles = controller_commander.get_current_joint_values( ) step_size_tmp = np.linalg.norm(dx) if step_size_tmp <= step_size_min: step_size_min = step_size_tmp else: dx = dx / step_size_tmp * step_size_min joints_vel = QP_abbirb6640( np.array(current_joint_angles).reshape(6, 1), np.array(dx)) goal = trapezoid_gen( np.array(current_joint_angles) + joints_vel.dot(dt), np.array(current_joint_angles), 0.25, np.array(dx)) step_size.append(np.linalg.norm(dx)) client = actionlib.SimpleActionClient("joint_trajectory_action", FollowJointTrajectoryAction) client.wait_for_server() client.send_goal(goal) client.wait_for_result() res = client.get_result() if (res.error_code != 0): raise Exception("Trajectory execution returned error") print res print 'Current Iteration Finished.' #Saving iteration data to file filename_data = "/home/rpi-cats/Desktop/YC/Data/Panel1_Data_" + str( t1) + ".mat" scipy.io.savemat(filename_data, mdict={ 'du_array': du_array, 'dv_array': dv_array, 'dx_array': dx_array, 'step_size': step_size, 'iteration': iteration }) #Saving force data to file filename_force_data = "/home/rpi-cats/Desktop/YC/Data/Panel1_Data_force_" + str( t1) + ".mat" scipy.io.savemat(filename_force_data, mdict={ 'FTread': FTread_array, 'FTdata': FTdata_array, 't_now_array': t_now_array }) print '###############################' print 'step_size', step_size print 'iteration', iteration print '###############################' ''' print "============ Final Push Down to Nest" controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 0.2, [],[]) Cur_Pose = controller_commander.get_current_pose_msg() rot_current = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z] trans_current = [Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z] pose_target2.R = rox.q2R([rot_current[0], rot_current[1], rot_current[2], rot_current[3]]) pose_target2.p = trans_current pose_target2.p[2] -= 0.11 ''' #### Final Nest Placement Error Calculation =============================== #Read new image last_ros_image_stamp = object_commander.ros_image_stamp try: ros_gripper_2_trigger.wait_for_service(timeout=0.1) ros_gripper_2_trigger() except: pass wait_count = 0 while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp: if wait_count > 50: raise Exception("Image receive timeout") time.sleep(0.25) wait_count += 1 result = object_commander.ros_image #Detect tag corners in aqcuired image using aruco corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers( result, aruco_dict, parameters=parameters) #Sort corners and ids according to ascending order of ids corners_original = copy.deepcopy(corners) ids_original = np.copy(ids) sorting_indices = np.argsort(ids_original, 0) ids_sorted = ids_original[sorting_indices] ids_sorted = ids_sorted.reshape([len(ids_original), 1]) combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original) combined_list.sort() corners_sorted = [x for y, x in combined_list] ids = np.copy(ids_sorted) corners = copy.deepcopy(corners_sorted) #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image) #false_ids_ind = np.where(ids>73) mask = np.ones(ids.shape, dtype=bool) #mask[false_ids_ind] = False ids = ids[mask] corners = np.array(corners) corners = corners[mask.flatten(), :] corners = list(corners) #Define object and image points of both tags objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints( board_ground, corners, ids) objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints( board_panel, corners, ids) objPoints_ground = objPoints_ground.reshape([objPoints_ground.shape[0], 3]) imgPoints_ground = imgPoints_ground.reshape([imgPoints_ground.shape[0], 2]) objPoints_panel = objPoints_panel.reshape([objPoints_panel.shape[0], 3]) imgPoints_panel = imgPoints_panel.reshape([imgPoints_panel.shape[0], 2]) #Save pose of marker boards after the iterations end(while in the final hovering position above nest) #Get pose of both ground and panel markers from detected corners retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP( objPoints_ground, imgPoints_ground, CamParam.camMatrix, CamParam.distCoeff) Rca_ground, b_ground = cv2.Rodrigues(rvec_ground) retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP( objPoints_panel, imgPoints_panel, CamParam.camMatrix, CamParam.distCoeff) Rca_panel, b_panel = cv2.Rodrigues(rvec_panel) print "============== Observed Pose difference in nest position" observed_tvec_difference = tvec_ground - tvec_panel observed_rvec_difference = rvec_ground - rvec_panel print "tvec difference: ", observed_tvec_difference print "rvec differnece: ", observed_rvec_difference #Load ideal pose differnece information from file loaded_rvec_difference = loadmat( '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat' )['rvec_difference'] loaded_tvec_difference = loadmat( '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat' )['tvec_difference'] print "============== Ideal Pose difference in nest position" print "tvec difference: ", loaded_tvec_difference print "rvec differnece: ", loaded_rvec_difference print "============== Difference in pose difference in nest position" tvec_err = loaded_tvec_difference - observed_tvec_difference rvec_err = loaded_rvec_difference - observed_rvec_difference print "tvec difference: ", tvec_err print "rvec differnece: ", rvec_err # Adjustment print "Adjustment ====================" current_joint_angles = controller_commander.get_current_joint_values() dx = np.array([0, 0, 0, -tvec_err[0], tvec_err[1], 0]) joints_vel = QP_abbirb6640( np.array(current_joint_angles).reshape(6, 1), np.array(dx)) goal = trapezoid_gen( np.array(current_joint_angles) + joints_vel.dot(1), np.array(current_joint_angles), 0.25, np.array(dx)) client = actionlib.SimpleActionClient("joint_trajectory_action", FollowJointTrajectoryAction) client.wait_for_server() client.send_goal(goal) client.wait_for_result() res = client.get_result() if (res.error_code != 0): raise Exception("Trajectory execution returned error") print res print "End of Adjustment ====================" #### Final Nest Placement Error Calculation =============================== #Read new image last_ros_image_stamp = object_commander.ros_image_stamp try: ros_gripper_2_trigger.wait_for_service(timeout=0.1) ros_gripper_2_trigger() except: pass wait_count = 0 while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp: if wait_count > 50: raise Exception("Image receive timeout") time.sleep(0.25) wait_count += 1 result = object_commander.ros_image #Detect tag corners in aqcuired image using aruco corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers( result, aruco_dict, parameters=parameters) #Sort corners and ids according to ascending order of ids corners_original = copy.deepcopy(corners) ids_original = np.copy(ids) sorting_indices = np.argsort(ids_original, 0) ids_sorted = ids_original[sorting_indices] ids_sorted = ids_sorted.reshape([len(ids_original), 1]) combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original) combined_list.sort() corners_sorted = [x for y, x in combined_list] ids = np.copy(ids_sorted) corners = copy.deepcopy(corners_sorted) #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image) #false_ids_ind = np.where(ids>73) mask = np.ones(ids.shape, dtype=bool) #mask[false_ids_ind] = False ids = ids[mask] corners = np.array(corners) corners = corners[mask.flatten(), :] corners = list(corners) #Define object and image points of both tags objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints( board_ground, corners, ids) objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints( board_panel, corners, ids) objPoints_ground = objPoints_ground.reshape([objPoints_ground.shape[0], 3]) imgPoints_ground = imgPoints_ground.reshape([imgPoints_ground.shape[0], 2]) objPoints_panel = objPoints_panel.reshape([objPoints_panel.shape[0], 3]) imgPoints_panel = imgPoints_panel.reshape([imgPoints_panel.shape[0], 2]) #Save pose of marker boards after the iterations end(while in the final hovering position above nest) #Get pose of both ground and panel markers from detected corners retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP( objPoints_ground, imgPoints_ground, CamParam.camMatrix, CamParam.distCoeff) Rca_ground, b_ground = cv2.Rodrigues(rvec_ground) retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP( objPoints_panel, imgPoints_panel, CamParam.camMatrix, CamParam.distCoeff) Rca_panel, b_panel = cv2.Rodrigues(rvec_panel) print "============== Observed Pose difference in nest position" observed_tvec_difference = tvec_ground - tvec_panel observed_rvec_difference = rvec_ground - rvec_panel print "tvec difference: ", observed_tvec_difference print "rvec differnece: ", observed_rvec_difference #Load ideal pose differnece information from file loaded_rvec_difference = loadmat( '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat' )['rvec_difference'] loaded_tvec_difference = loadmat( '/home/rpi-cats/Desktop/DJ/Ideal Position/Panel1_Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat' )['tvec_difference'] print "============== Ideal Pose difference in nest position" print "tvec difference: ", loaded_tvec_difference print "rvec differnece: ", loaded_rvec_difference print "============== Difference in pose difference in nest position" tvec_err = loaded_tvec_difference - observed_tvec_difference rvec_err = loaded_rvec_difference - observed_rvec_difference rospy.loginfo("tvec difference: %f, %f, %f", tvec_err[0], tvec_err[1], tvec_err[2]) rospy.loginfo("rvec difference: %f, %f, %f", rvec_err[0], rvec_err[1], rvec_err[2]) print "rvec differnece: ", tvec_err print "rvec differnece: ", rvec_err #Saving pose information to file filename_pose2 = "/home/rpi-cats/Desktop/DJ/Code/Data/In_Nest_Pose_" + str( t1) + ".mat" scipy.io.savemat(filename_pose2, mdict={ 'tvec_ground_In_Nest': tvec_ground, 'tvec_panel_In_Nest': tvec_panel, 'Rca_ground_In_Nest': Rca_ground, 'Rca_panel_In_Nest': Rca_panel, 'tvec_difference_In_Nest': tvec_ground - tvec_panel, 'rvec_difference_In_Nest': rvec_ground - rvec_panel, 'loaded_tvec_difference': loaded_tvec_difference, 'loaded_rvec_difference': loaded_rvec_difference, 'observed_tvec_difference': observed_tvec_difference, 'observed_rvec_difference': observed_rvec_difference }) controller_commander.set_controller_mode( controller_commander.MODE_AUTO_TRAJECTORY, 0.7, []) # raw_input("confirm release vacuum") rapid_node.set_digital_io("Vacuum_enable", 0) g = ProcessStepGoal('plan_place_set_second_step', "") process_client.send_goal(g) #self.in_process=True print process_client.get_result() print "VACUUM OFF!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" time.sleep(0.5) tran = np.array([2.17209718963, -1.13702651252, 0.224273328841]) rot = rox.q2R( [0.0145063655084, -0.0282932350764, 0.999322921073, -0.0185137145776]) pose_target2 = rox.Transform(rot, tran) Cur_Pose = controller_commander.get_current_pose_msg() rot_current = [ Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x, Cur_Pose.pose.orientation.y, Cur_Pose.pose.orientation.z ] trans_current = [ Cur_Pose.pose.position.x, Cur_Pose.pose.position.y, Cur_Pose.pose.position.z ] pose_target2.R = rox.q2R( [rot_current[0], rot_current[1], rot_current[2], rot_current[3]]) pose_target2.p = trans_current pose_target2.p[2] += 0.5 # # # #print 'Target:',pose_target3 # # #print "============ Executing plan4" controller_commander.compute_cartesian_path_and_move( pose_target2, avoid_collisions=False) t2 = time.time() print 'Execution Finished.' print "Execution time: " + str(t2 - t1) + " seconds" s = ProcessState() s.state = "place_set" s.payload = "leeward_mid_panel" s.target = "" process_state_pub.publish(s)
def main(): #Start timer to measure execution time t1 = time.time() #Subscribe to controller_state rospy.Subscriber("controller_state", controllerstate, callback) last_ros_image_stamp = rospy.Time(0) #Force-torque if not "disable-ft" in sys.argv: ft_threshold1 = ft_threshold else: ft_threshold1 = [] #Initialize ros node of this process rospy.init_node('Placement_DJ_1', anonymous=True) #print "============ Starting setup" listener = PayloadTransformListener() rapid_node = rapid_node_pkg.RAPIDCommander() controller_commander = controller_commander_pkg.ControllerCommander() object_commander = ObjectRecognitionCommander() robot = urdf.robot_from_parameter_server() #subscribe to Gripper camera node for image acquisition ros_gripper_2_img_sub = rospy.Subscriber( '/gripper_camera_2/image', Image, object_commander.ros_raw_gripper_2_image_cb) ros_gripper_2_trigger = rospy.ServiceProxy( '/gripper_camera_2/continuous_trigger', CameraTrigger) #Set controller command mode controller_commander.set_controller_mode( controller_commander.MODE_AUTO_TRAJECTORY, 0.4, ft_threshold_place, []) time.sleep(0.5) #Set Location above the panel where the end effector goes first (in the world/base frame) Ideal location of panel. tran = np.array([2.20120663258, 1.2145882986, 0.0798989466944]) rot = np.array([[-0.9971185, 0.0071821, 0.0755188], [0.0056502, 0.9997743, -0.0204797], [-0.0756488, -0.0199939, -0.9969341]]) pose_target2 = rox.Transform(rot, tran) pose_target3 = copy.deepcopy(pose_target2) pose_target2.p[2] += 0.35 #20 cm above ideal location of panel #Execute movement to set location print "Executing initial path" controller_commander.compute_cartesian_path_and_move( pose_target2, avoid_collisions=False) #Initilialize aruco boards and parameters aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_ARUCO_ORIGINAL) parameters = cv2.aruco.DetectorParameters_create() parameters.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX board_ground = cv2.aruco.GridBoard_create(5, 4, 0.04, 0.01, aruco_dict, 20) board_panel = cv2.aruco.GridBoard_create(8, 3, 0.025, 0.0075, aruco_dict, 50) # tag_ids=["vacuum_gripper_marker_1","leeward_mid_panel_marker_1", "aligner_ref_1", "aligner_ref_2", "aligner_ref_3", "aligner_ref_4"] # boards=[gripper_board, panel_board, ref1, ref2, ref3, ref4] #Initialize camera intrinsic parameters #18285636_10_05_2018_5_params CamParam = CameraParams(2342.561249990927, 1209.151959040735, 2338.448312671424, 1055.254852652610, 1.0, -0.014840837133389, -0.021008029929566, 3.320024219653553e-04, -0.002187550225028, -0.025059986937316) #Subscribe tp controller state. Again? rospy.Subscriber("controller_state", controllerstate, callback) UV = np.zeros([74, 8]) P = np.zeros([74, 3]) #Load object points ground tag in panel tag coordinate system from mat file loaded_object_points_ground_in_panel_system_stage_1 = loadmat( '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat' )['object_points_ground_in_panel_tag_system'] loaded_object_points_ground_in_panel_system_stage_2 = loadmat( '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat' )['object_points_ground_in_panel_tag_system'] loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_1 #focal length in pixel units, this number is averaged values from f_hat for x and y f_hat_u = 2342.561249990927 #2282.523358266698#2281.339593273153 #2446.88 f_hat_v = 2338.448312671424 #2280.155828279608 #functions like a gain, used with velocity to track position dt = 0.1 du = 100.0 dv = 100.0 dutmp = 100.0 dvtmp = 100.0 TimeGain = [0.1, 0.1, 0.1] du_array = [] dv_array = [] dx_array = [] iteration = 0 stage = 1 step_ts = 0.004 Kc = 0.0002 time_save = [] FTdata_save = [] Tran_z = np.array([[0, 0, -1], [0, -1, 0], [1, 0, 0]]) Vec_wrench = 100 * np.array([ 0.019296738361905, 0.056232033265447, 0.088644197659430, 0.620524934626544, -0.517896661195076, 0.279323567303444, -0.059640563813256, 0.631460085138371, -0.151143175570223, -6.018321330845553 ]).transpose() FTdata_0 = FTdata T = listener.lookupTransform("base", "link_6", rospy.Time(0)) rg = 9.8 * np.matmul( np.matmul(T.R, Tran_z).transpose(), np.array([0, 0, 1]).transpose()) A1 = np.hstack([ rox.hat(rg).transpose(), np.zeros([3, 1]), np.eye(3), np.zeros([3, 3]) ]) A2 = np.hstack( [np.zeros([3, 3]), rg.reshape([3, 1]), np.zeros([3, 3]), np.eye(3)]) A = np.vstack([A1, A2]) FTdata_0est = np.matmul(A, Vec_wrench) FTread_array = [] FTdata_array = [] t_now_array = [] controller_commander.set_controller_mode( controller_commander.MODE_AUTO_TRAJECTORY, 1.0, [], []) #while (pose_target2.p[2] > 0.185): # while ((du>10) | (dv>10) | (pose_target2.p[2] > 0.172)): #try changing du and dv to lower values(number of iterations increases) while ( (du > 1) | (dv > 1) and (iteration < 125) ): #try changing du and dv to lower values(number of iterations increases) # while ((np.max(np.abs(dutmp))>0.5) | (np.max(np.abs(dvtmp))>0.5)): #try changing du and dv to lower values(number of iterations increases) iteration += 1 t_now_array.append(time.time()) #Go to stage2 of movement(mostly downward z motion) if ((du < 2) and (dv < 2) and (stage == 1)): #Save pose of marker boards after the iterations end(while in the final hovering position above nest) #Get pose of both ground and panel markers from detected corners retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP( objPoints_ground, imgPoints_ground, CamParam.camMatrix, CamParam.distCoeff) Rca_ground, b_ground = cv2.Rodrigues(rvec_ground) retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP( objPoints_panel, imgPoints_panel, CamParam.camMatrix, CamParam.distCoeff) Rca_panel, b_panel = cv2.Rodrigues(rvec_panel) print "============== Observed Pose difference in hovering position" observed_tvec_difference = tvec_ground - tvec_panel observed_rvec_difference = rvec_ground - rvec_panel print "tvec difference: ", observed_tvec_difference print "rvec differnece: ", observed_rvec_difference #Load ideal pose differnece information from file loaded_rvec_difference = loadmat( '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat' )['rvec_difference'] loaded_tvec_difference = loadmat( '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat' )['tvec_difference'] print "============== Ideal Pose difference in hovering position" print "tvec difference: ", loaded_tvec_difference print "rvec differnece: ", loaded_rvec_difference tvec_difference_Above_Nest = loaded_tvec_difference - observed_tvec_difference rvec_difference_Above_Nest = loaded_rvec_difference - observed_rvec_difference print "============== Difference in pose difference in hovering position" print "tvec difference: ", tvec_difference_Above_Nest print "rvec differnece: ", rvec_difference_Above_Nest #Saving pose information to file filename_pose1 = "/home/rpi-cats/Desktop/DJ/Code/Data/Above_Nest_Pose_" + str( t1) + ".mat" scipy.io.savemat(filename_pose1, mdict={ 'tvec_ground_Above_Nest': tvec_ground, 'tvec_panel_Above_Nest': tvec_panel, 'Rca_ground_Above_Nest': Rca_ground, 'Rca_panel_Above_Nest': Rca_panel, 'tvec_difference_Above_Nest': tvec_ground - tvec_panel, 'rvec_difference_Above_Nest': rvec_ground - rvec_panel, 'loaded_tvec_difference': loaded_tvec_difference, 'loaded_rvec_difference': loaded_rvec_difference, 'observed_tvec_difference': observed_tvec_difference, 'observed_rvec_difference': observed_rvec_difference }) raw_input("Confirm Stage 2") stage = 2 # dt=0.02 loaded_object_points_ground_in_panel_system = loaded_object_points_ground_in_panel_system_stage_2 #print pose_target2.p[2] #Print current robot pose at the beginning of this iteration Cur_Pose = controller_commander.get_current_pose_msg() print "============ Current Robot Pose" print Cur_Pose #Read new image last_ros_image_stamp = object_commander.ros_image_stamp try: ros_gripper_2_trigger.wait_for_service(timeout=0.1) ros_gripper_2_trigger(False) except: pass wait_count = 0 while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp: if wait_count > 50: raise Exception("Image receive timeout") time.sleep(0.25) wait_count += 1 result = object_commander.ros_image #Save # filename = "Acquisition3_%d.jpg" % (time.time()) # scipy.misc.imsave(filename, result) #Display # cv2.namedWindow('Aqcuired Image',cv2.WINDOW_NORMAL) # cv2.imshow('Acquired Image',result) # cv2.waitKey(1) #Detect tag corners in aqcuired image using aruco corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers( result, aruco_dict, parameters=parameters) frame_with_markers_and_axis = result #Sort corners and ids according to ascending order of ids corners_original = copy.deepcopy(corners) ids_original = np.copy(ids) sorting_indices = np.argsort(ids_original, 0) ids_sorted = ids_original[sorting_indices] ids_sorted = ids_sorted.reshape([len(ids_original), 1]) combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original) # print "combined_list:",combined_list combined_list.sort() corners_sorted = [x for y, x in combined_list] ids = np.copy(ids_sorted) corners = copy.deepcopy(corners_sorted) #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image) false_ids_ind = np.where(ids > 73) mask = np.ones(ids.shape, dtype=bool) mask[false_ids_ind] = False ids = ids[mask] corners = np.array(corners) corners = corners[mask.flatten(), :] corners = list(corners) #Define object and image points of both tags objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints( board_ground, corners, ids) objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints( board_panel, corners, ids) objPoints_ground = objPoints_ground.reshape( [objPoints_ground.shape[0], 3]) imgPoints_ground = imgPoints_ground.reshape( [imgPoints_ground.shape[0], 2]) objPoints_panel = objPoints_panel.reshape( [objPoints_panel.shape[0], 3]) imgPoints_panel = imgPoints_panel.reshape( [imgPoints_panel.shape[0], 2]) #Get pose of both ground and panel markers from detected corners retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP( objPoints_ground, imgPoints_ground, CamParam.camMatrix, CamParam.distCoeff) Rca_ground, b_ground = cv2.Rodrigues(rvec_ground) retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP( objPoints_panel, imgPoints_panel, CamParam.camMatrix, CamParam.distCoeff) Rca_panel, b_panel = cv2.Rodrigues(rvec_panel) frame_with_markers_and_axis = cv2.aruco.drawAxis( frame_with_markers_and_axis, CamParam.camMatrix, CamParam.distCoeff, Rca_ground, tvec_ground, 0.2) frame_with_markers_and_axis = cv2.aruco.drawAxis( frame_with_markers_and_axis, CamParam.camMatrix, CamParam.distCoeff, Rca_panel, tvec_panel, 0.2) rvec_all_markers_ground, tvec_all_markers_ground, _ = aruco.estimatePoseSingleMarkers( corners[0:20], 0.04, CamParam.camMatrix, CamParam.distCoeff) rvec_all_markers_panel, tvec_all_markers_panel, _ = aruco.estimatePoseSingleMarkers( corners[20:45], 0.025, CamParam.camMatrix, CamParam.distCoeff) tvec_all = np.concatenate( (tvec_all_markers_ground, tvec_all_markers_panel), axis=0) for i_ids, i_corners, i_tvec in zip(ids, corners, tvec_all): #print 'i_corners',i_corners,i_corners.reshape([1,8]) UV[i_ids, :] = i_corners.reshape( [1, 8]) #np.average(i_corners, axis=1) P[i_ids, :] = i_tvec #used to find the height of the tags and the delta change of height, z height at desired position Z = 1 * P[20:40, 2] #- [0.68184539, 0.68560932, 0.68966803, 0.69619578]) #check if all tags detected if retVal_ground != 0 and retVal_panel != 0: dutmp = [] dvtmp = [] #Pixel estimates of the ideal ground tag location reprojected_imagePoints_ground_2, jacobian2 = cv2.projectPoints( loaded_object_points_ground_in_panel_system.transpose(), rvec_panel, tvec_panel, CamParam.camMatrix, CamParam.distCoeff) reprojected_imagePoints_ground_2 = reprojected_imagePoints_ground_2.reshape( [reprojected_imagePoints_ground_2.shape[0], 2]) # print "Image Points Ground:", imgPoints_ground # print "Reprojected Image Points Ground2:", reprojected_imagePoints_ground_2 # print "Reprojectoin error:",imgPoints_ground-reprojected_imagePoints_ground_2 # print "Average Reprojectoin error: ",np.mean(imgPoints_ground-reprojected_imagePoints_ground_2, axis=0) #print "Reprojected Image Points Ground2 type:", type(reprojected_imagePoints_ground_2) #plot image points for ground tag from corner detection and from re-projections for point1, point2 in zip( imgPoints_ground, np.float32(reprojected_imagePoints_ground_2)): cv2.circle(frame_with_markers_and_axis, tuple(point1), 5, (0, 0, 255), 3) cv2.circle(frame_with_markers_and_axis, tuple(point2), 5, (255, 0, 0), 3) # cv2.namedWindow('Aqcuired Image',cv2.WINDOW_NORMAL) # cv2.imshow('Acquired Image',frame_with_markers_and_axis) # cv2.waitKey(1) #Save filename_image = "/home/rpi-cats/Desktop/DJ/Code/Images/Acquisition_" + str( t1) + "_" + str(iteration) + ".jpg" scipy.misc.imsave(filename_image, frame_with_markers_and_axis) #Go through a particular point in all tags to build the complete Jacobian for ic in range(4): #uses first set of tags, numbers used to offset camera frame, come from camera parameters #UV_target = np.vstack([UV[9:13,2*ic]-1209.151959040735,UV[9:13,2*ic+1]-1055.254852652610]).T - UV_shift[:,(2*ic):(2*ic+2)] #shift corner estimates to the image centers. Necessary for the image jacobian to work. reprojected_imagePoints_ground_2 = np.reshape( reprojected_imagePoints_ground_2, (20, 8)) UV_target = np.vstack([ reprojected_imagePoints_ground_2[:, 2 * ic] - 1209.151959040735, reprojected_imagePoints_ground_2[:, 2 * ic + 1] - 1055.254852652610 ]).T uc = UV_target[:, 0] vc = UV_target[:, 1] # print 'UV_target', UV_target UV_current = np.vstack([ UV[20:40, 2 * ic] - 1209.151959040735, UV[20:40, 2 * ic + 1] - 1055.254852652610 ]).T #find difference between current and desired tag difference delta_UV = UV_target - UV_current # print 'delta_UV: ',ic, delta_UV dutmp.append(np.mean(delta_UV[:, 0])) dvtmp.append(np.mean(delta_UV[:, 1])) for tag_i in range(20): if tag_i == 0: J_cam_tmp = 1.0 * np.array( [[ -f_hat_u / Z[tag_i], 0.0, uc[tag_i] / Z[tag_i], uc[tag_i] * vc[tag_i] / f_hat_u, -1.0 * (uc[tag_i] * uc[tag_i] / f_hat_u + f_hat_u), vc[tag_i] ], [ 0.0, -f_hat_v / Z[tag_i], vc[tag_i] / Z[tag_i], vc[tag_i] * vc[tag_i] / f_hat_v + f_hat_v, -1.0 * uc[tag_i] * vc[tag_i] / f_hat_v, -uc[tag_i] ]]) else: J_cam_tmp = np.concatenate( (J_cam_tmp, 1.0 * np.array( [[ -f_hat_u / Z[tag_i], 0.0, uc[tag_i] / Z[tag_i], uc[tag_i] * vc[tag_i] / f_hat_u, -1.0 * (uc[tag_i] * uc[tag_i] / f_hat_u + f_hat_u), vc[tag_i] ], [ 0.0, -f_hat_v / Z[tag_i], vc[tag_i] / Z[tag_i], vc[tag_i] * vc[tag_i] / f_hat_v + f_hat_v, -1.0 * uc[tag_i] * vc[tag_i] / f_hat_v, -uc[tag_i] ]])), axis=0) #camera jacobian if ic == 0: J_cam = J_cam_tmp delta_UV_all = delta_UV.reshape(40, 1) UV_target_all = UV_target.reshape(40, 1) else: J_cam = np.vstack([J_cam, J_cam_tmp]) delta_UV_all = np.vstack( [delta_UV_all, delta_UV.reshape(40, 1)]) UV_target_all = np.vstack( [UV_target_all, UV_target.reshape(40, 1)]) print 'dutmp: ', dutmp print 'dvtmp: ', dvtmp du = np.mean(np.absolute(dutmp)) dv = np.mean(np.absolute(dvtmp)) print 'Average du of all points:', du, 'Average dv of all points:', dv du_array.append(du) dv_array.append(dv) # print 'delta_UV_all',delta_UV_all dx = np.matmul(np.linalg.pinv(J_cam), np.array(delta_UV_all)) dx_array.append(dx) # print 'dx:',dx #translational and angular velocity #print '1:',dx[0:3,0] #print '2:',np.hstack([0,-dx[0:3,0]]) #print '3:',np.hstack([dx[0:3,0].reshape(3,1),rox.hat(np.array(dx[0:3,0]))]) #coordinate system change, replace with tf transform #dx_w = np.array([dx[3,0],-dx[4,0],-dx[5,0]]) #angular velocity #map omega to current quaternion #Omega = 0.5*np.vstack([np.hstack([0,-dx_w]),np.hstack([dx_w.reshape(3,1),rox.hat(np.array(dx_w))])]) #print Omega #rot_current = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z] #rot = np.matmul(expm(Omega*dt),np.array(rot_current)) #trans_current = [Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z] #trans = trans_current + np.array([dx[0,0],-dx[1,0],-dx[2,0]])*TimeGain #pose_target2 = rox.Transform(rox.q2R([rot[0], rot[1], rot[2], rot[3]]), trans) #Vz=0 dx = np.array([ dx[3, 0], -dx[4, 0], -dx[5, 0], dx[0, 0], -dx[1, 0], -dx[2, 0] ]) if stage == 2: T = listener.lookupTransform("base", "link_6", rospy.Time(0)) rg = 9.8 * np.matmul( np.matmul(T.R, Tran_z).transpose(), np.array([0, 0, 1]).transpose()) A1 = np.hstack([ rox.hat(rg).transpose(), np.zeros([3, 1]), np.eye(3), np.zeros([3, 3]) ]) A2 = np.hstack([ np.zeros([3, 3]), rg.reshape([3, 1]), np.zeros([3, 3]), np.eye(3) ]) A = np.vstack([A1, A2]) FTdata_est = np.matmul(A, Vec_wrench) FTread = FTdata - FTdata_0 - FTdata_est + FTdata_0est print 'FTread:', FTread print 'Z', FTread[-1] if FTread[-1] > -100: F_d = -150 Kc = 0.0001 else: Kc = 0.0001 F_d = -200 Vz = Kc * (F_d - FTread[-1]) print 'Vz', Vz dx[-1] = Vz + dx[-1] FTread_array.append(FTread) FTdata_array.append(FTdata) current_joint_angles = controller_commander.get_current_joint_values( ) J = rox.robotjacobian(robot, current_joint_angles) joints_vel = np.linalg.pinv(J).dot(np.array(dx)) print 'vel_norm:', np.linalg.norm(joints_vel) t_duration = np.log(np.linalg.norm(joints_vel) * 10 + 1) + 0.1 if t_duration < 0.1: t_duration = 0.1 goal = trapezoid_gen( np.array(current_joint_angles) + joints_vel.dot(dt), np.array(current_joint_angles), t_duration) ''' plan=RobotTrajectory() plan.joint_trajectory.joint_names=['joint_1', 'joint_2', 'joint_3', 'joint_4', 'joint_5', 'joint_6'] plan.joint_trajectory.header.frame_id='/world' p1=JointTrajectoryPoint() p1.positions = current_joint_angles p1.velocities = np.zeros((6,)) p1.accelerations = np.zeros((6,)) p1.time_from_start = rospy.Duration(0) p2=JointTrajectoryPoint() p2.positions = np.array(p1.positions) + joints_vel.dot(dt) p2.velocities = np.zeros((6,)) p2.accelerations = np.zeros((6,)) p2.time_from_start = rospy.Duration(dt) controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 1.0, [], []) plan.joint_trajectory.points.append(p1) plan.joint_trajectory.points.append(p2) ''' client = actionlib.SimpleActionClient("joint_trajectory_action", FollowJointTrajectoryAction) client.wait_for_server() client.send_goal(goal) client.wait_for_result() res = client.get_result() if (res.error_code != 0): raise Exception("Trajectory execution returned error") print res #break #raw_input("confirm move...") ''' print "============ Executing Current Iteration movement" try: controller_commander.execute(plan) #controller_commander.compute_cartesian_path_and_move(pose_target2, avoid_collisions=False) except: pass ''' print 'Current Iteration Finished.' # filename = "delta_UV_all.txt" # f_handle = file(filename, 'a') # np.savetxt(f_handle, delta_UV_all) # f_handle.close() # # filename = "UV_target_all.txt" # f_handle = file(filename, 'a') # np.savetxt(f_handle, UV_target_all) # f_handle.close() # # filename = "P.txt" # f_handle = file(filename, 'a') # np.savetxt(f_handle, P) # f_handle.close() # # # filename = "Robot.txt" # f_handle = file(filename, 'a') # np.savetxt(f_handle, np.hstack([np.array(trans_current),np.array(rot_current)])) # f_handle.close() #Saving iteration data to file filename_data = "/home/rpi-cats/Desktop/DJ/Code/Data/Data_" + str( t1) + ".mat" scipy.io.savemat(filename_data, mdict={ 'du_array': du_array, 'dv_array': dv_array, 'dx_array': dx_array }) #Saving force data to file filename_force_data = "/home/rpi-cats/Desktop/DJ/Code/Data/Data_force_" + str( t1) + ".mat" scipy.io.savemat(filename_force_data, mdict={ 'FTread': FTread_array, 'FTdata': FTdata_array, 't_now_array': t_now_array }) # ####Hovering error calculation # #Read new image # last_ros_image_stamp = object_commander.ros_image_stamp # try: # ros_gripper_2_trigger.wait_for_service(timeout=0.1) # ros_gripper_2_trigger(False) # except: # pass # wait_count=0 # while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp: # if wait_count > 50: # raise Exception("Image receive timeout") # time.sleep(0.25) # wait_count += 1 # result = object_commander.ros_image # # #Detect tag corners in aqcuired image using aruco # corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(result, aruco_dict, parameters=parameters) # # #Sort corners and ids according to ascending order of ids # corners_original=copy.deepcopy(corners) # ids_original=np.copy(ids) # sorting_indices=np.argsort(ids_original,0) # ids_sorted=ids_original[sorting_indices] # ids_sorted=ids_sorted.reshape([len(ids_original),1]) # combined_list=zip(np.ndarray.tolist(ids.flatten()),corners_original) # combined_list.sort() # corners_sorted=[x for y,x in combined_list] # ids=np.copy(ids_sorted) # corners=copy.deepcopy(corners_sorted) # # #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image) # false_ids_ind = np.where(ids>73) # mask = np.ones(ids.shape, dtype=bool) # mask[false_ids_ind] = False # ids = ids[mask] # corners = np.array(corners) # corners = corners[mask.flatten(),:] # corners = list(corners) # # #Define object and image points of both tags # objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints(board_ground, corners, ids) # objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints(board_panel, corners, ids) # objPoints_ground=objPoints_ground.reshape([objPoints_ground.shape[0],3]) # imgPoints_ground=imgPoints_ground.reshape([imgPoints_ground.shape[0],2]) # objPoints_panel=objPoints_panel.reshape([objPoints_panel.shape[0],3]) # imgPoints_panel=imgPoints_panel.reshape([imgPoints_panel.shape[0],2]) # # #Save pose of marker boards after the iterations end(while in the final hovering position above nest) # #Get pose of both ground and panel markers from detected corners # retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP(objPoints_ground, imgPoints_ground, CamParam.camMatrix, CamParam.distCoeff) # Rca_ground, b_ground = cv2.Rodrigues(rvec_ground) # retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP(objPoints_panel, imgPoints_panel, CamParam.camMatrix, CamParam.distCoeff) # Rca_panel, b_panel = cv2.Rodrigues(rvec_panel) # # print"============== Observed Pose difference in hovering position" # observed_tvec_difference=tvec_ground-tvec_panel # observed_rvec_difference=rvec_ground-rvec_panel # print "tvec difference: ",observed_tvec_difference # print "rvec differnece: ",observed_rvec_difference # # #Load ideal pose differnece information from file # loaded_rvec_difference = loadmat('/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat')['rvec_difference'] # loaded_tvec_difference = loadmat('/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_Above_Nest.mat')['tvec_difference'] # # print"============== Ideal Pose difference in hovering position" # print "tvec difference: ",loaded_tvec_difference # print "rvec differnece: ",loaded_rvec_difference # # print"============== Difference in pose difference in hovering position" # print "tvec difference: ",loaded_tvec_difference-observed_tvec_difference # print "rvec differnece: ",loaded_rvec_difference-observed_rvec_difference # # #Saving pose information to file # filename_pose1 = "/home/armabb6640/Desktop/DJ/Code/Data/Above_Nest_Pose_"+str(t1)+".mat" # scipy.io.savemat(filename_pose1, mdict={'tvec_ground_Above_Nest':tvec_ground, 'tvec_panel_Above_Nest':tvec_panel, # 'Rca_ground_Above_Nest':Rca_ground, 'Rca_panel_Above_Nest': Rca_panel, 'tvec_difference_Above_Nest': tvec_ground-tvec_panel, 'rvec_difference_Above_Nest': rvec_ground-rvec_panel}) print "============ Final Push Down to Nest" # while (1): #DJ Final push from hovering above nest into resting in the nest controller_commander.set_controller_mode( controller_commander.MODE_AUTO_TRAJECTORY, 0.2, [], []) # pose_target2.p[2] = 0.115 # pose_target2.p[2] = 0.15 Cur_Pose = controller_commander.get_current_pose_msg() rot_current = [ Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x, Cur_Pose.pose.orientation.y, Cur_Pose.pose.orientation.z ] trans_current = [ Cur_Pose.pose.position.x, Cur_Pose.pose.position.y, Cur_Pose.pose.position.z ] pose_target2.R = rox.q2R( [rot_current[0], rot_current[1], rot_current[2], rot_current[3]]) pose_target2.p = trans_current pose_target2.p[2] -= 0.11 # pose_target2.p[0] += 0.005 # pose_target2.p[1] -= 0.002 # controller_commander.compute_cartesian_path_and_move(pose_target2, avoid_collisions=False) #### Final Nest Placement Error Calculation =============================== #Read new image last_ros_image_stamp = object_commander.ros_image_stamp try: ros_gripper_2_trigger.wait_for_service(timeout=0.1) ros_gripper_2_trigger(False) except: pass wait_count = 0 while object_commander.ros_image is None or object_commander.ros_image_stamp == last_ros_image_stamp: if wait_count > 50: raise Exception("Image receive timeout") time.sleep(0.25) wait_count += 1 result = object_commander.ros_image #Detect tag corners in aqcuired image using aruco corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers( result, aruco_dict, parameters=parameters) #Sort corners and ids according to ascending order of ids corners_original = copy.deepcopy(corners) ids_original = np.copy(ids) sorting_indices = np.argsort(ids_original, 0) ids_sorted = ids_original[sorting_indices] ids_sorted = ids_sorted.reshape([len(ids_original), 1]) combined_list = zip(np.ndarray.tolist(ids.flatten()), corners_original) combined_list.sort() corners_sorted = [x for y, x in combined_list] ids = np.copy(ids_sorted) corners = copy.deepcopy(corners_sorted) #Remove ids and corresponsing corners not in range (Parasitic detections in random locations in the image) false_ids_ind = np.where(ids > 73) mask = np.ones(ids.shape, dtype=bool) mask[false_ids_ind] = False ids = ids[mask] corners = np.array(corners) corners = corners[mask.flatten(), :] corners = list(corners) #Define object and image points of both tags objPoints_ground, imgPoints_ground = aruco.getBoardObjectAndImagePoints( board_ground, corners, ids) objPoints_panel, imgPoints_panel = aruco.getBoardObjectAndImagePoints( board_panel, corners, ids) objPoints_ground = objPoints_ground.reshape([objPoints_ground.shape[0], 3]) imgPoints_ground = imgPoints_ground.reshape([imgPoints_ground.shape[0], 2]) objPoints_panel = objPoints_panel.reshape([objPoints_panel.shape[0], 3]) imgPoints_panel = imgPoints_panel.reshape([imgPoints_panel.shape[0], 2]) #Save pose of marker boards after the iterations end(while in the final hovering position above nest) #Get pose of both ground and panel markers from detected corners retVal_ground, rvec_ground, tvec_ground = cv2.solvePnP( objPoints_ground, imgPoints_ground, CamParam.camMatrix, CamParam.distCoeff) Rca_ground, b_ground = cv2.Rodrigues(rvec_ground) retVal_panel, rvec_panel, tvec_panel = cv2.solvePnP( objPoints_panel, imgPoints_panel, CamParam.camMatrix, CamParam.distCoeff) Rca_panel, b_panel = cv2.Rodrigues(rvec_panel) print "============== Observed Pose difference in nest position" observed_tvec_difference = tvec_ground - tvec_panel observed_rvec_difference = rvec_ground - rvec_panel print "tvec difference: ", observed_tvec_difference print "rvec differnece: ", observed_rvec_difference #Load ideal pose differnece information from file loaded_rvec_difference = loadmat( '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat' )['rvec_difference'] loaded_tvec_difference = loadmat( '/home/rpi-cats/Desktop/DJ/Ideal Position/Cam_636_object_points_ground_tag_in_panel_frame_In_Nest.mat' )['tvec_difference'] print "============== Ideal Pose difference in nest position" print "tvec difference: ", loaded_tvec_difference print "rvec differnece: ", loaded_rvec_difference print "============== Difference in pose difference in nest position" print "tvec difference: ", loaded_tvec_difference - observed_tvec_difference print "rvec differnece: ", loaded_rvec_difference - observed_rvec_difference #Saving pose information to file filename_pose2 = "/home/rpi-cats/Desktop/DJ/Code/Data/In_Nest_Pose_" + str( t1) + ".mat" scipy.io.savemat(filename_pose2, mdict={ 'tvec_ground_In_Nest': tvec_ground, 'tvec_panel_In_Nest': tvec_panel, 'Rca_ground_In_Nest': Rca_ground, 'Rca_panel_In_Nest': Rca_panel, 'tvec_difference_In_Nest': tvec_ground - tvec_panel, 'rvec_difference_In_Nest': rvec_ground - rvec_panel, 'loaded_tvec_difference': loaded_tvec_difference, 'loaded_rvec_difference': loaded_rvec_difference, 'observed_tvec_difference': observed_tvec_difference, 'observed_rvec_difference': observed_rvec_difference }) # print "============ Lift gripper!" # controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 0.7, []) # raw_input("confirm release vacuum") # #rapid_node.set_digital_io("Vacuum_enable", 0) # print "VACUUM OFF!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" # time.sleep(0.5) # pose_target3.p[2] += 0.2 # # # #print 'Target:',pose_target3 # # #print "============ Executing plan4" # #controller_commander.compute_cartesian_path_and_move(pose_target3, avoid_collisions=False) t2 = time.time() print 'Execution Finished.' print "Execution time: " + str(t2 - t1) + " seconds"
def main(): #Start timer to measure execution time t1 = time.time() #Subscribe to controller_state rospy.Subscriber("controller_state", controllerstate, callback) last_ros_image_stamp = rospy.Time(0) #Force-torque if not "disable-ft" in sys.argv: ft_threshold1=ft_threshold else: ft_threshold1=[] #Initialize ros node of this process rospy.init_node('user_defined_motion', anonymous=True) #print "============ Starting setup" listener = PayloadTransformListener() rapid_node = rapid_node_pkg.RAPIDCommander() controller_commander = controller_commander_pkg.ControllerCommander() object_commander=ObjectRecognitionCommander() robot = urdf.robot_from_parameter_server() #subscribe to Gripper camera node for image acquisition ros_gripper_2_img_sub=rospy.Subscriber('/gripper_camera_2/image', Image, object_commander.ros_raw_gripper_2_image_cb) ros_gripper_2_trigger=rospy.ServiceProxy('/gripper_camera_2/continuous_trigger', CameraTrigger) #Set controller command mode controller_commander.set_controller_mode(controller_commander.MODE_AUTO_TRAJECTORY, 0.4, ft_threshold_place,[]) time.sleep(0.5) #Set Location above the panel where the end effector goes first (in the world/base frame) Ideal location of panel. current_joint_angles = controller_commander.get_current_joint_values() Cur_Pose = controller_commander.get_current_pose_msg() rot_o = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z] trans_o = np.array([Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z]) pos_command = [] quat_command = [] pos_read = [] quat_read = [] time_all = [] for i in range(4): rot_set = rox.q2R(rot_o) tran_set = trans_o+np.array([0,0,i*0.25]) pose_target2 = rox.Transform(rot_set, tran_set) #Execute movement to set location print "Executing initial path" controller_commander.compute_cartesian_path_and_move(pose_target2, avoid_collisions=False) time.sleep(1) Cur_Pose = controller_commander.get_current_pose_msg() rot_cur = [Cur_Pose.pose.orientation.w, Cur_Pose.pose.orientation.x,Cur_Pose.pose.orientation.y,Cur_Pose.pose.orientation.z] trans_cur = [Cur_Pose.pose.position.x,Cur_Pose.pose.position.y,Cur_Pose.pose.position.z] pos_command.append(tran_set) quat_command.append(rot_set) pos_read.append(trans_cur) quat_read.append(rot_cur) time_all.append(time.time()) #Saving iteration data to file filename_data = "/home/rpi-cats/Desktop/YC/Data/Motion Capture/Data_"+str(t1)+".mat" scipy.io.savemat(filename_data, mdict={'pos_command':pos_command, 'quat_command':quat_command, 'pos_read':pos_read, 'quat_read':quat_read, 'time_all':time_all}) t2 = time.time() print 'Execution Finished.' print "Execution time: " + str(t2-t1) + " seconds"