return target_in_base def print_detection_info(res): print("Detection number: ", res.size()) print("----------------------------------") for r in res: print("classid: ", r.classid) if __name__ == '__main__': """ Initialization """ # camera and robot driver print('work_dir: ', _root_path) cam = Realsense(frame_width=1280, frame_height=720, fps=30) cfg = read_cfg('./Grasping_Franka/config/grasping _colorseg.yaml') arm = FrankaController('./Grasping_Franka/config/franka.yaml') cs = ComSwitch() # gripper ''' Loading config ''' initial_pose = cfg[ 'initial_position'] # should be outside the grasping area # check_position = cfg['check_position'] drop_position = cfg['drop_position'] grasp_pre_offset = cfg['grasp_prepare_offset'] effector_offset = cfg[ 'effector_offset'] # distance between the flange and the center of gripper, positive value check_threshold = cfg['check_threshold']
# Stack arrays to get transformation matrix H # H = np.vstack((np.hstack((R, t)), padding)) # print("Found marker, H = ", "\n", H) return R, t, visualize_img # print("No marker found in this frame !") return [], [], color_frame if __name__ == '__main__': from Grasping_Franka.Driver.realsense_wapper import Realsense cam = Realsense(frame_width=1280, frame_height=720, fps=30) while (True): _, color_frame = cam.get_frame_cv() corners, ids = detect_ar_marker(color_frame, cam.get_intrinsics_matrix(), cam.get_distortion_coeffs()) # print("Cornors:", corners) # print("IDs: ", ids) visualize_img = color_frame if (len(corners) > 0): R, t, _ = cv.aruco.estimatePoseSingleMarkers(
# cv2.imshow('Gray image', gray) # cv2.imshow('Blur', blur) # cv2.imshow('binary', binary) # cv2.imshow('bbox', img) # cv2.waitKey(10) return x, y, w, h if __name__ == '__main__': """ Initialization """ # camera and robot driver print('work_dir: ', _root_path) cam = Realsense(frame_width=1280, frame_height=720, fps=30, exposure=800, white_balance=3500, contrast=80, sharpness=80) _, img = cam.get_frame_cv() y1 = 269 y2 = 542 x1 = 475 x2 = 799 crop = img[y1:y2, x1:x2] gray = cv2.cvtColor(crop, cv2.COLOR_BGR2GRAY) blur = cv2.blur(gray, (4, 4)) _, binary = cv2.threshold(blur, 130, 256, cv2.THRESH_BINARY) x, y, w, h = cv2.boundingRect(binary)
plt.figure(3) base_T_cam = ptrans.transform_from(base_T_cam_R, base_T_cam_t, True) tm = TransformManager() tm.add_transform("base", "cam", base_T_cam) axis = tm.plot_frames_in("base", s=0.3) plt.show() if __name__ == '__main__': print('work_dir: ', _root_path) arm = FrankaController('./Grasping_Franka/config/franka.yaml') cam = Realsense(frame_width=1280, frame_height=720, fps=30) cfg = read_cali_cfg("./Grasping_Franka/config/calibration.yaml") initial_pose = cfg['initial_position'] cube_size = cfg['sample_cube_size'] maker_size = cfg['marker_size'] sample_repeat_num = cfg['marker_repeat_sample'] x_step = cfg['x_stride'] y_step = cfg['y_stride'] z_step = cfg['z_stride'] rotation_upper_limit = cfg['rotation_upper_limit'] rotation_lower_limit = cfg['rotation_lower_limit']
print('work_dir: ', _root_path) import cv2 # from deepclaw.driver.sensors.camera.Realsense_L515 import Realsense from Grasping_Franka.Driver.realsense_wapper import Realsense if __name__ == '__main__': # camera and robot driver print('work_dir: ', _root_path) ''' camera = Realsense('./Grasping_Franka/config/camera_rs_d435.yaml') frame = camera.get_frame() color = frame.color_image[0] depth_img = frame.depth_image[0] ''' camera = Realsense(frame_width=1280, frame_height=720, fps=30) depth, color = camera.get_frame_cv() cv2.imshow("img", color) cv2.waitKey(1000) crop = color[178:542, 433:891] cv2.imshow("crop", crop) cv2.waitKey(10000) cv2.destroyAllWindows()