q_x=dQ[0], q_y=dQ[1], q_z=dQ[2], q_w=dQ[3]) Gp.move_move(limb, group, positions=moveJoint) # Retake image about the block for recognition img = Gp.take_picture(0, 30) square_list = iH.square_img_to_centers_list(img) square_to_find = iH.find_square_closest_to_center(img, square_list) H, W, Ang = gi.predictGraspOnImage( img, [square_to_find.getCenter().x, square_to_find.getCenter().y]) worldVec, hom_Mtrx_c_b, rot = Gp.pixelToWorld( square_to_find.getCenterX(), square_to_find.getCenterY()) Ang = square_to_find.getAngle(square_list) Gp.graspExecute(limb, gripper, W, H, Ang, worldVec[0], worldVec[1], 1, group) movingLoc = drop_off_location pre_moving_loc = copy.deepcopy(drop_off_location) pre_moving_loc[2] += 0.3 Qua = Gp.euler_to_quaternion(z=pre_moving_loc[3]) pre_drop_block_pos = Gp.ik_service_client(limb='right', use_advanced_options=True,
# cv2.imshow('image', frame) # cv2.waitKey(0) objLoc = Gp.detect_objects(type_of_fruit, frame) # try: # H, W, Ang = gi.predictGraspOnImage(frame, objLoc) # except ValueError: # print('failed 1st grasping attempt') # frame = Gp.take_picture(0, 30) # cv2.imshow('image', frame) # cv2.waitKey(0) H, W, Ang = gi.predictGraspOnImage(frame, objLoc) # H = (H+worldVec[0])*0.5 # W = (W+worldVec[1])*0.5 # print "W is: ", W # print "H is: ", H # print "worldVec[0] is: ", worldVec[0] # print "worldVec[1] is: ", worldVec[1] Gp.graspExecute(limb, gripper, W, H, Ang, worldVec[0], worldVec[1], table) rospy.sleep(1) rospy.sleep(1) drop_fruit_pos = [ -0.402546875, -1.1622041015625, 0.3266787109375, 2.2412666015625, -0.301185546875, 0.469794921875, -1.2894443359375 ]
square_list = iH.square_img_to_centers_list(img, temp_workspace) timeout = 0 while square_list is None: if timeout > 20: rospy.logerr( "No block exists in the frame. Returning to initial position") exit() img = Gp.get_video() square_list = iH.square_img_to_centers_list(img, temp_workspace) timeout += 1 square_to_find = iH.find_square_closest_to_center(img, square_list) H, W, Ang = gi.predictGraspOnImage( img, [square_to_find.getCenter().x, square_to_find.getCenter().y]) worldVec, hom_Mtrx_c_b, rot = Gp.pixelToWorld(square_to_find.getCenterX(), square_to_find.getCenterY()) Ang = square_to_find.getAngle(square_list) Gp.graspExecute(limb, gripper, W, H, Ang, worldVec[0], worldVec[1], 1, group, temp_workspace) # Get the marked position and drop the block if temp_workspace: pre_drop_pos = camera_center_human_left else: pre_drop_pos = camera_center_human_right Gp.move_move(limb, group, pre_drop_pos) rospy.sleep(1)