def __init__(self): # 글로벌 변수 설정 self.bridge = CvBridge() self.recognition_image = np.zeros(shape=(480, 640)) self.depth_image = np.zeros(shape=(480, 640)) self.bounding_boxes = BoundingBoxes() self.object_count = ObjectCount() # 카메라 특성값 설정 self.aligned_depth_intrin = rs.intrinsics() self.aligned_depth_intrin.coeffs = [0.0, 0.0, 0.0, 0.0, 0.0] self.aligned_depth_intrin.fx = 617.44921875 self.aligned_depth_intrin.fy = 617.110168457 self.aligned_depth_intrin.height = 480 self.aligned_depth_intrin.width = 640 self.aligned_depth_intrin.model = rs.distortion.inverse_brown_conrady self.aligned_depth_intrin.ppx = 328.007507324 self.aligned_depth_intrin.ppy = 241.498748779 # 구독 설정 rospy.Subscriber("camera/color/image_raw/compressed", CompressedImage, self.bridge_color_image) rospy.Subscriber('camera/depth/image_raw', Image, self.update_depth_image) rospy.Subscriber('darknet_ros/bounding_boxes', BoundingBoxes, self.update_bounding_boxes) rospy.Subscriber('darknet_ros/found_object', ObjectCount, self.update_object_count) # 발행 설정 self.pedestrian_info_pub = rospy.Publisher("recognition/pedestrian_infos", PedestrianInfos, queue_size=1)
def image_depth_info_cb(self, camera_info): """ROS Callback for image_depth_info Arguments: camera_info -- message of type sensor_msgs.CameraInfo """ try: if self.intrinsics: return self.intrinsics = rs2.intrinsics() self.intrinsics.width = camera_info.width self.intrinsics.height = camera_info.height self.intrinsics.ppx = camera_info.K[2] self.intrinsics.ppy = camera_info.K[5] self.intrinsics.fx = camera_info.K[0] self.intrinsics.fy = camera_info.K[4] if camera_info.distortion_model == "plumb_bob": self.intrinsics.model = rs2.distortion.brown_conrady elif camera_info.distortion_model == "equidistant": self.intrinsics.model = rs2.distortion.kannala_brandt4 print(self.intrinsics) self.intrinsics.coeffs = [i for i in camera_info.D] except CvBridgeError as e: print(e) return
def __init__(self, address): asyncore.dispatcher.__init__(self) print("Launching Realsense Camera Server") self.pipeline, self.sensor = open_pipeline() self.create_socket(socket.AF_INET, socket.SOCK_STREAM) print('sending acknowledgement to', address) # reduce the resolution of the depth image using post processing self.decimate_filter = rs.decimation_filter() self.decimate_filter.set_option(rs.option.filter_magnitude, 2) self.frame_data = '' self.frame_length = 0 self.connect((MC_IP_ADDRESS, PORT)) self.packet_id = 0 self.remaining_bytes = 0 self.buffer = bytearray() self.color_image = np.array([]) self.depth_image = np.array([]) self.depth_scale = 0 self.aligned_frames = [] self.intrinsics = rs.intrinsics() # yolov3 detection result boxes = dict() boxes['detection_classes'] = [] boxes['detection_boxes'] = [] boxes['detection_scores'] = [] boxes['bgr_img'] = [] self.boxes = boxes
def camera_intrinsics_from_camera_info(self, camera_info): """Syncs the camera intrinsics. Copies the information from a CameraInfo ROS message to local rs2.instrinsics(). This enables the calculation of x and y coordinates. Parameters ---------- camera_info : CameraInfo (sensor_msgs.msg) The camera's instrinsics and extrinsics """ try: # https://github.com/IntelRealSense/realsense-ros/issues/1342 self.intrinsics = rs2.intrinsics() self.intrinsics.width = camera_info.width self.intrinsics.height = camera_info.height self.intrinsics.ppx = camera_info.K[2] self.intrinsics.ppy = camera_info.K[5] self.intrinsics.fx = camera_info.K[0] self.intrinsics.fy = camera_info.K[4] if camera_info.distortion_model == "plumb_bob": self.intrinsics.model = rs2.distortion.brown_conrady elif camera_info.distortion_model == "equidistant": self.intrinsics.model = rs2.distortion.kannala_brandt4 self.intrinsics.coeffs = [i for i in camera_info.D] if self.verbose: rospy.loginfo("Camera instrinsics:\n" + str(self.intrinsics)) """ With the 435i it should be something like: 640x480 p[319.199 242.08] f[614.686 614.842] Brown Conrady [0 0 0 0 0] """ except CvBridgeError as err: rospy.logerr(err)
def create_pose(self, x, y, angle, dist): cameraInfo = cam.depth_intrinsics _intrinsics = rs.intrinsics() _intrinsics.width = cameraInfo.width _intrinsics.height = cameraInfo.height _intrinsics.ppx = cameraInfo.ppx _intrinsics.ppy = cameraInfo.ppy _intrinsics.fx = cameraInfo.fx _intrinsics.fy = cameraInfo.fy ###_intrinsics.model = cameraInfo.distortion_model #_intrinsics.model = rs.distortion.none #_intrinsics.coeffs = [i for i in cameraInfo.D] result = rs.rs2_deproject_pixel_to_point(cameraInfo, [x, y], dist) p = Pose() p.position.x = result[2] p.position.y = -result[0] p.position.z = -result[1] # Make sure the quaternion is valid and normalized angle = radians(angle + 45) p.orientation.x = sin(angle / 2) * 1 p.orientation.y = sin(angle / 2) * 0 p.orientation.z = sin(angle / 2) * 0 p.orientation.w = cos(angle / 2) return p
def read_intrinsics(file_path): intrinsics = rs2.intrinsics() intrinsics_file = open(file_path, "r") intr_string = intrinsics_file.read() #extract values from file intr_value = intr_string[intr_string.find("width:"):] intrinsics.width = int(intr_value[intr_value.find(":") + 2:intr_value.find(",")]) intr_value = intr_string[intr_string.find("height:"):] intrinsics.height = int(intr_value[intr_value.find(":") + 2:intr_value.find(",")]) intr_value = intr_string[intr_string.find("ppx:"):] intrinsics.ppx = float(intr_value[intr_value.find(":") + 2:intr_value.find(",")]) intr_value = intr_string[intr_string.find("ppy:"):] intrinsics.ppy = float(intr_value[intr_value.find(":") + 2:intr_value.find(",")]) intr_value = intr_string[intr_string.find("fx:"):] intrinsics.fx = float(intr_value[intr_value.find(":") + 2:intr_value.find(",")]) intr_value = intr_string[intr_string.find("fy:"):] intrinsics.fy = float(intr_value[intr_value.find(":") + 2:intr_value.find(",")]) intr_value = intr_string[intr_string.find("model:"):] intr_model = intr_value[intr_value.find(":") + 2:intr_value.find(",")] if intr_model == "None": pass if intr_model == "Brown Conrady": intrinsics.model = rs2.distortion.brown_conrady elif intr_model == "Modified Brown Conrady": intrinsics.model = rs2.distortion.modified_brown_conrady elif intr_model == "Inverse Brown Conrady": intrinsics.model = rs2.distortion.inverse_brown_conrady intr_value = intr_string[intr_string.find("coeffs:"):] intr_value = intr_value[intr_value.find("[") + 1:intr_value.find("]")] count = sum(chars.count(",") for chars in intr_value) intr_coeff = [] for i in range(5): if intr_value.find(",") is not -1: print(i) print(intr_value) intr_coeff.append(float(intr_value[:intr_value.find(",")])) intr_value = intr_value[intr_value.find(",") + 2:] else: intr_coeff.append(float(intr_value)) print(intr_coeff) intrinsics.coeffs = intr_coeff return intrinsics
def dict_to_intrinsics(d: dict): intrinsics = rs.intrinsics() intrinsics.ppx = d['ppx'] intrinsics.ppy = d['ppy'] intrinsics.fx = d['fx'] intrinsics.fy = d['fy'] intrinsics.width = d['width'] intrinsics.height = d['height'] intrinsics.model = rs.distortion(d['model']) intrinsics.coeffs = d['coeffs'] return intrinsics
def intrin_callback(self, data): #print("intrin_callback") self.intrin = rs.intrinsics() self.intrin.width = data.width self.intrin.height = data.height self.intrin.ppx = data.K[2] self.intrin.ppy = data.K[5] self.intrin.fx = data.K[0] self.intrin.fy = data.K[4] self.intrin.model = data.distortion_model self.intrin.coeffs = [0, 0, 0, 0, 0]
def convert_depth_to_phys_coord_using_realsense(x, y, depth): _intrinsics = rs.intrinsics() _intrinsics.width = 1920 #cameraInfo.width _intrinsics.height = 1080 #cameraInfo.height _intrinsics.ppx = 654.888000488281 #cameraInfo.K[2] _intrinsics.ppy = 368.117980957031 #cameraInfo.K[5] _intrinsics.fx = 912.651000976562 #cameraInfo.K[0] _intrinsics.fy = 911.876037597656 #cameraInfo.K[4] #_intrinsics.model = cameraInfo.distortion_model _intrinsics.model = rs.distortion.inverse_brown_conrady _intrinsics.coeffs = [0, 0, 0, 0, 0] #[i for i in cameraInfo.D] result = rs.rs2_deproject_pixel_to_point(_intrinsics, [x, y], depth) #result[0]: right, result[1]: down, result[2]: forward return result[2], -result[0], -result[1]
def convert_depth_to_phys_coord_using_realsense(x, y, depth, cameraInfo): _intrinsics = pyrealsense2.intrinsics() _intrinsics.width = cameraInfo.width _intrinsics.height = cameraInfo.height _intrinsics.ppx = cameraInfo.K[2] _intrinsics.ppy = cameraInfo.K[5] _intrinsics.fx = cameraInfo.K[0] _intrinsics.fy = cameraInfo.K[4] #_intrinsics.model = cameraInfo.distortion_model _intrinsics.model = pyrealsense2.distortion.none _intrinsics.coeffs = [i for i in cameraInfo.D] result = pyrealsense2.rs2_deproject_pixel_to_point(_intrinsics, [x, y], depth) #result[0]: right, result[1]: down, result[2]: forward #return result[0], result[1], result[2] return result[2], -result[0], -result[1]
def intrin_callback(self, cameraInfo): """ D435 camera intrinsic values can be derived from CameraInfo ROS message. """ if not self.intrin_retrieved: self.intrin = rs.intrinsics() self.intrin.width = cameraInfo.width self.intrin.height = cameraInfo.height self.intrin.ppx = cameraInfo.K[2] self.intrin.ppy = cameraInfo.K[5] self.intrin.fx = cameraInfo.K[0] self.intrin.fy = cameraInfo.K[4] #self.intrin.model = cameraInfo.distortion_model self.intrin.model = rs.distortion.none self.intrin.coeffs = [i for i in cameraInfo.D] self.intrin_retrieved = True
def camera_info2rs_intrinsics(camera_info): # init object rs_intrinsics = rs.intrinsics() # dimensions rs_intrinsics.width = camera_info.width rs_intrinsics.height = camera_info.height # principal point coordinates rs_intrinsics.ppx = camera_info.K[2] rs_intrinsics.ppy = camera_info.K[5] # focal point rs_intrinsics.fx = camera_info.K[0] rs_intrinsics.fy = camera_info.K[4] return rs_intrinsics
def imageColorInfoCallback(self, cameraInfo): try: if self.color_intrinsics: return self.color_intrinsics = rs2.intrinsics() self.color_intrinsics.width = cameraInfo.width self.color_intrinsics.height = cameraInfo.height self.color_intrinsics.ppx = cameraInfo.K[2] self.color_intrinsics.ppy = cameraInfo.K[5] self.color_intrinsics.fx = cameraInfo.K[0] self.color_intrinsics.fy = cameraInfo.K[4] if cameraInfo.distortion_model == 'plumb_bob': self.color_intrinsics.model = rs2.distortion.brown_conrady elif cameraInfo.distortion_model == 'equidistant': self.color_intrinsics.model = rs2.distortion.kannala_brandt4 self.color_intrinsics.coeffs = [i for i in cameraInfo.D] except Exception as e: print(e) rospy.logerr('Error on color info callback')
def imageDepthInfoCallback(self, cameraInfo): try: if self.intrinsics: return self.intrinsics = rs2.intrinsics() self.intrinsics.width = cameraInfo.width self.intrinsics.height = cameraInfo.height self.intrinsics.ppx = cameraInfo.K[2] self.intrinsics.ppy = cameraInfo.K[5] self.intrinsics.fx = cameraInfo.K[0] self.intrinsics.fy = cameraInfo.K[4] if cameraInfo.distortion_model == 'plumb_bob': self.intrinsics.model = rs2.distortion.brown_conrady elif cameraInfo.distortion_model == 'equidistant': self.intrinsics.model = rs2.distortion.kannala_brandt4 self.intrinsics.coeffs = [i for i in cameraInfo.D] except CvBridgeError as e: print(e) return
def load_intrinsics(filename): depth_scale = 1.0 intrin = rs.intrinsics() if exists(filename): with open(filename) as infile: data = json.load(infile) depth_scale = data['depth_scale'] intrin.width = data['width'] intrin.height = data['height'] intrin.ppx = data['ppx'] intrin.ppy = data['ppy'] intrin.fx = data['fx'] intrin.fy = data['fy'] if data['model'] == '%s' % (rs.distortion.brown_conrady,): intrin.model == rs.distortion.brown_conrady intrin.coeffs = data['coeffs'] return intrin, depth_scale
def intrin(resolution, pp, f, model, coeffs): """ Makes psuedo-intrinsics for the realsense camera used. Arguments: resolution: Camera resolution (tuple) pp: pixels per meter (tuple)(x,y) f: focal plan position (tuple)(x,y) model: distortion model to use (rs.distortion) coeffs: distortion coeffs """ a = rs.intrinsics() a.width = max(resolution) a.height = min(resolution) a.ppx = pp[0] a.ppy = pp[1] a.fx = f[0] a.fy = f[1] a.coeffs = coeffs a.model = model return a
def __init__(self, root, serial_number, enable_depth=True, enable_color=True, enable_ptcld=False): # Input the root path to the folder containing the data self.root = root self.serial_number = serial_number if enable_depth: self.depth_filenames = os.listdir(root+serial_number+'/depth') # Get the filenames of all png if enable_color: self.color_filenames = os.listdir(root+serial_number+'/color') # Get the filenames of all png if enable_ptcld: self.ptcld_filenames = os.listdir(root+serial_number) # Get the filenames of all pcd self.max_index = 1e1000 # Set to large number first if enable_depth: self.max_depth_index = len(self.depth_filenames); self.max_index = min(self.max_index, self.max_depth_index) if enable_color: self.max_color_index = len(self.color_filenames); self.max_index = min(self.max_index, self.max_color_index) if enable_ptcld: self.max_ptcld_index = len(self.ptcld_filenames); self.max_index = min(self.max_index, self.max_ptcld_index) print('max_index', self.max_index) # Take a look at the first image to get the width and height of the image if enable_depth: img = cv2.imread(root+serial_number+'/depth/'+self.depth_filenames[0], cv2.IMREAD_ANYDEPTH) if enable_color: img = cv2.imread(root+serial_number+'/color/'+self.color_filenames[0]) if enable_ptcld: img = np.zeros((480, 640)) # Note: Hardcode default value self.img_height, self.img_width = img.shape[0], img.shape[1] # Extract back the camera parameters self.camera_intrinsics = rs.intrinsics() # Must put () else all the params will be linked when multiple instance of this class is called params = yaml.load(open('data/camera_params_' + str(self.img_width) + 'x' + str(self.img_height) + '_' + self.serial_number + '.yaml'), Loader=yaml.FullLoader) self.depth_scale = params['depth_scale'] self.clipping_distance_in_meters = params['clipping_distance_in_meters'] # self.camera_intrinsics.width = params['depth_camera_intrinsics']['width'] # self.camera_intrinsics.height = params['depth_camera_intrinsics']['height'] # self.camera_intrinsics.ppx = params['depth_camera_intrinsics']['ppx'] # self.camera_intrinsics.ppy = params['depth_camera_intrinsics']['ppy'] # self.camera_intrinsics.fx = params['depth_camera_intrinsics']['fx'] # self.camera_intrinsics.fy = params['depth_camera_intrinsics']['fy'] self.camera_intrinsics.width = params['color_camera_intrinsics']['width'] self.camera_intrinsics.height = params['color_camera_intrinsics']['height'] self.camera_intrinsics.ppx = params['color_camera_intrinsics']['ppx'] self.camera_intrinsics.ppy = params['color_camera_intrinsics']['ppy'] self.camera_intrinsics.fx = params['color_camera_intrinsics']['fx'] self.camera_intrinsics.fy = params['color_camera_intrinsics']['fy']
def main(): rospy.init_node("ObjectDetector") rospy.on_shutdown(callback_shutting_down) global publisher publisher = rospy.Publisher("objects_detected", ObjectsDetected, queue_size=RATE) rospy.loginfo("*Node ObjectDetector started*") global cv_bridge, rater, obj_det_process cv_bridge = CvBridge() rater = rospy.Rate(RATE) obj_det_process = init_obj_det_process() if obj_det_process == None: rospy.loginfo("*ERROR: Process could not be opened, closing.*") exit() seconds = int(1 / RATE) timer_analyze = rospy.Timer( rospy.Duration(seconds, int(1000000000 * (1.0 / RATE - seconds))), callback_timer_analyze_msg_image) # `queue_size=1` to always get the last one. if USE_RS_CAMERA: # Let's wait to get the intrinsics of the depth. # TODO: Obviously this isn't the best way, as if the config of the # camera changes or just that this blocks the start. rospy.loginfo("Waiting to receive camera info topic...") depth_info = rospy.wait_for_message("/camera/depth/camera_info", CameraInfo) global rs_intrinsics # IntelRealSense/realsense-ros: realsense2_camera/src/base_realsense_node.cpp::BaseRealSenseNode::updateStreamCalibData() rs_intrinsics = rs.intrinsics() rs_intrinsics.height = depth_info.height rs_intrinsics.width = depth_info.width rs_intrinsics.ppx = depth_info.K[2] rs_intrinsics.ppy = depth_info.K[5] rs_intrinsics.fx = depth_info.K[0] rs_intrinsics.fy = depth_info.K[4] # rs_intrinsics.coeffs = [] for i in range(len(depth_info.D)): rs_intrinsics.coeffs.append(depth_info.D[i]) # This is hardcoded as (at this point) isn't correctly set. Check updateStreamCalibData() rs_intrinsics.model = rs.distortion.inverse_brown_conrady # TODO: Changing to use `image_transport` instead of the raw, could be good idea. rospy.Subscriber("/camera/color/image_raw", Image, callback_topic_img, queue_size=1) rospy.Subscriber("/camera/aligned_depth_to_color/image_raw", Image, callback_topic_depth, queue_size=1) else: rospy.Subscriber("frames", Image, callback_topic_img, queue_size=1) rospy.loginfo("*ObjectDetection module ready to callback*") rospy.spin() rospy.loginfo("*Finishing node*") close_obj_det_process() timer_analyze.shutdown() cv2.destroyAllWindows() rospy.loginfo("*Node finished*")
mean_depth = 0 for coords in hole.points: print(mean_depth) new_depth[coords[0], coords[1]] = mean_depth return new_depth process = True _distance = 0.46 #ground distance or max distance _seedlingheight = 0.28 #minimum distance #set depth camera parameters distance = 0.359 depth_scale = 9.999999747378752e-05 intrinsics = rs.intrinsics() intrinsics.width = 1280 intrinsics.height = 720 intrinsics.ppx = 639.399 intrinsics.ppy = 350.551 intrinsics.fx = 906.286 intrinsics.fy = 905.369 intrinsics.model = rs.distortion.inverse_brown_conrady intrinsics.coeffs = [0.0, 0.0, 0.0, 0.0, 0.0] def colorize(depth_image): d_image = depth_image.astype(np.float) _min = _seedlingheight #_distance-_seedlingheight _max = _distance normalized = 255.0 * (d_image - _min) / (_max - _min)
def __init__(self): #self.image_pub = rospy.Publisher("image_topic_2",Image) rospy.init_node('targetVisualServoing', anonymous=True) targName = rospy.get_param('~targetName', 'steelStand') self.bridge = CvBridge() self.rgb_sub = rospy.Subscriber("cam_d1/color/image_raw", Image, self.rgbCallback) init_data = rospy.wait_for_message("cam_d1/color/image_raw", Image) self.rgbCallback(init_data) self.depth_sub = rospy.Subscriber( "cam_d1/aligned_depth_to_color/image_raw", Image, self.grayCallback) init_data = rospy.wait_for_message( "cam_d1/aligned_depth_to_color/image_raw", Image) self.grayCallback(init_data) # depth camera info for transformation #self.info_sub = rospy.Subscriber("/cam_d1/aligned_depth_to_color/camera_info", CameraInfo, self.imageDepthInfoCallback) init_data = rospy.wait_for_message( "/cam_d1/aligned_depth_to_color/camera_info", CameraInfo) self.intrinsics = rs2.intrinsics() self.intrinsics.width = init_data.width self.intrinsics.height = init_data.height self.intrinsics.ppx = init_data.K[2] self.intrinsics.ppy = init_data.K[5] self.intrinsics.fx = init_data.K[0] self.intrinsics.fy = init_data.K[4] if init_data.distortion_model == 'plumb_bob': self.intrinsics.model = rs2.distortion.brown_conrady elif init_data.distortion_model == 'equidistant': self.intrinsics.model = rs2.distortion.kannala_brandt4 self.intrinsics.coeffs = [i for i in init_data.D] #self.pc_sub = rospy.Subscriber("cam_d1/depth/color/points",PointCloud2,self.ptCloudCallback) #init_data = rospy.wait_for_message("cam_d1/depth/color/points",PointCloud2) #self.ptCloudCallback(init_data) self.vel_pub = rospy.Publisher('/mobile_base_controller/cmd_vel', Twist, queue_size=1) # control self.kp = [0.5, 0.5, 2.0] self.kd = [0.025, 0.025, 0.02] self.vlim = [0.1, 0.1, 0.1] self.errX = 0. self.errY = 0. self.errAng = 0. self.errXOld = 0. self.errYOld = 0. self.errAngOld = 0. # target detection #self.featureExtractor = cv2.AKAZE_create() #self.featureExtractor = cv2.xfeatures2d.SIFT_create() self.featureExtractor = cv2.xfeatures2d.SURF_create(400) #self.featureExtractor = cv2.ORB_create() #index_params= dict(algorithm = 6, #FLANN_INDEX_LSH, # table_number = 12, # 6 # key_size = 20, # 12 # multi_probe_level = 2) #1 index_params = dict( algorithm=0, #FLANN_INDEX_KDTREE trees=5) search_params = dict(checks=50) self.matcher = cv2.FlannBasedMatcher(index_params, search_params) #self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False) self.matchFlag = False # load target features im_target = cv2.imread( '/home/cartman/Dev/Mobile_Manipulation_Dev/src/pcv_base/resources/visualTargets/' + targName + '.jpg') (rows, cols, channels) = im_target.shape #scale = 0.1 #im_target = cv2.resize(im_target, (int(cols*scale), int(rows*scale)), interpolation = cv2.INTER_CUBIC) self.im_target = cv2.normalize(im_target, im_target, 0, 255, norm_type=cv2.NORM_MINMAX) self.kp_tgt, self.des_tgt = self.featureExtractor.detectAndCompute( self.im_target, None) h, w, c = self.im_target.shape self.tgt_coords = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2) self.tgt_area = h * w # target position (of target object from robot base_link) tgtX = rospy.get_param('~desX', 1.8) tgtY = rospy.get_param('~desY', -0.1) self.pos_tgt = [tgtX, tgtY] self.ang_tgt = rospy.get_param('~desTh', 0.0) ca = np.cos(self.ang_tgt) sa = np.sin(self.ang_tgt) # tgtToObj ^ -1 = R^T * (T^-1) self.objToTgt = np.matmul(np.array([[ca,sa,0.],[-sa,ca,0.],[0.,0.,1.]]), \ np.array([[1.,0.,-self.pos_tgt[0]],[0.,1.,-self.pos_tgt[1]],[0.,0.,1.]])) self.tfBuffer = tf2_ros.Buffer() self.tfListener = tf2_ros.TransformListener(self.tfBuffer) # target tracking self.trackerObj = cv2.TrackerKCF_create self.track_window = (0, 0, 0, 0) # tracking loop termination criteria: 5 iters max or target moved 1 pixel self.track_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 5, 1) self.rate = 10 self.rosRate = rospy.Rate(self.rate)
and camera_resolution['depth-h'] == 360): setting_isOK = True ppx = 321.3486328125 ppy = 179.70550537109375 fx = 461.9239807128906 fy = 462.3507080078125 if (not setting_isOK): print('*** Please set the correct instricsic') sys.exit() ############################ #get them from realsense api ############################ depth_scale = 0.0010000000474974513 #seems to be same in any resolution depth_intrin = rs.intrinsics() depth_intrin.width = rs_image_rgb.shape[1] depth_intrin.height = rs_image_rgb.shape[0] depth_intrin.coeffs = [0, 0, 0, 0, 0] depth_intrin.ppx = ppx depth_intrin.ppy = ppy depth_intrin.fx = fx depth_intrin.fy = fy depth_intrin.model = rs.distortion.brown_conrady #target detect args enable_image_show = True enable_fps_show = True def applyFilter(img, Lmask_MIN, Lmask_MAX, Umask_MIN, Umask_MAX):
def __init__(self): #self.image_pub = rospy.Publisher("image_topic_2",Image) rospy.init_node('targetVisualServoing', anonymous=True) self.refL = rospy.get_param('~refIDLeft', 801) self.refR = rospy.get_param('~refIDRight', 802) arucoType = rospy.get_param('~codeType', 'DICT_4X4_1000') self.bridge = CvBridge() self.rgb_sub = rospy.Subscriber("cam_d1/color/image_raw", Image, self.rgbCallback) init_data = rospy.wait_for_message("cam_d1/color/image_raw", Image) self.rgbCallback(init_data) self.depth_sub = rospy.Subscriber( "cam_d1/aligned_depth_to_color/image_raw", Image, self.grayCallback) init_data = rospy.wait_for_message( "cam_d1/aligned_depth_to_color/image_raw", Image) self.grayCallback(init_data) # depth camera info for transformation #self.info_sub = rospy.Subscriber("/cam_d1/aligned_depth_to_color/camera_info", CameraInfo, self.imageDepthInfoCallback) init_data = rospy.wait_for_message( "/cam_d1/aligned_depth_to_color/camera_info", CameraInfo) self.intrinsics = rs2.intrinsics() self.intrinsics.width = init_data.width self.intrinsics.height = init_data.height self.intrinsics.ppx = init_data.K[2] self.intrinsics.ppy = init_data.K[5] self.intrinsics.fx = init_data.K[0] self.intrinsics.fy = init_data.K[4] if init_data.distortion_model == 'plumb_bob': self.intrinsics.model = rs2.distortion.brown_conrady elif init_data.distortion_model == 'equidistant': self.intrinsics.model = rs2.distortion.kannala_brandt4 self.intrinsics.coeffs = [i for i in init_data.D] #self.pc_sub = rospy.Subscriber("cam_d1/depth/color/points",PointCloud2,self.ptCloudCallback) #init_data = rospy.wait_for_message("cam_d1/depth/color/points",PointCloud2) #self.ptCloudCallback(init_data) self.vel_pub = rospy.Publisher('/mobile_base_controller/cmd_vel', Twist, queue_size=1) # control self.kp = [1.0, 1.0, 1.0] self.kd = [0.05, 0.05, 0.05] self.vlim = [0.1, 0.1, 0.1] self.errX = 0. self.errY = 0. self.errAng = 0. self.errXOld = 0. self.errYOld = 0. self.errAngOld = 0. ARUCO_DICT = { "DICT_4X4_50": cv2.aruco.DICT_4X4_50, "DICT_4X4_100": cv2.aruco.DICT_4X4_100, "DICT_4X4_250": cv2.aruco.DICT_4X4_250, "DICT_4X4_1000": cv2.aruco.DICT_4X4_1000, "DICT_5X5_50": cv2.aruco.DICT_5X5_50, "DICT_5X5_100": cv2.aruco.DICT_5X5_100, "DICT_5X5_250": cv2.aruco.DICT_5X5_250, "DICT_5X5_1000": cv2.aruco.DICT_5X5_1000, "DICT_6X6_50": cv2.aruco.DICT_6X6_50, "DICT_6X6_100": cv2.aruco.DICT_6X6_100, "DICT_6X6_250": cv2.aruco.DICT_6X6_250, "DICT_6X6_1000": cv2.aruco.DICT_6X6_1000, "DICT_7X7_50": cv2.aruco.DICT_7X7_50, "DICT_7X7_100": cv2.aruco.DICT_7X7_100, "DICT_7X7_250": cv2.aruco.DICT_7X7_250, "DICT_7X7_1000": cv2.aruco.DICT_7X7_1000, "DICT_ARUCO_ORIGINAL": cv2.aruco.DICT_ARUCO_ORIGINAL #"DICT_APRILTAG_16h5": cv2.aruco.DICT_APRILTAG_16h5, #"DICT_APRILTAG_25h9": cv2.aruco.DICT_APRILTAG_25h9, #"DICT_APRILTAG_36h10": cv2.aruco.DICT_APRILTAG_36h10, #"DICT_APRILTAG_36h11": cv2.aruco.DICT_APRILTAG_36h11 } # target detection self.arucoDict = cv2.aruco.Dictionary_get(ARUCO_DICT[arucoType]) self.arucoParams = cv2.aruco.DetectorParameters_create() self.matchFlag = False self.controlValid = False # target position (of the middle point between ID1 and ID2, from robot base_link) tgtX = rospy.get_param('~desX', 1.85) tgtY = rospy.get_param('~desY', -0.05) self.pos_tgt = [tgtX, tgtY] self.ang_tgt = rospy.get_param('~desTh', 0.0) self.relPosL = np.array([0., 0.]) self.relPosR = np.array([0., 0.]) ca = np.cos(self.ang_tgt) sa = np.sin(self.ang_tgt) # tgtToObj ^ -1 = R^T * (T^-1) self.objToTgt = np.matmul(np.array([[ca,sa,0.],[-sa,ca,0.],[0.,0.,1.]]), \ np.array([[1.,0.,-self.pos_tgt[0]],[0.,1.,-self.pos_tgt[1]],[0.,0.,1.]])) self.tfBuffer = tf2_ros.Buffer() self.tfListener = tf2_ros.TransformListener(self.tfBuffer) # target tracking self.rate = 10 self.rosRate = rospy.Rate(self.rate)
def get_view_distribution(data_path, d, n, n_viewpoints, plot=False, l_arrow=30, reference_point=np.array([0,0,0])): points = [] arrows = [] for idx in range(n): with open(os.path.join(data_path, d, '{:06d}.meta.json'.format(idx))) as f: meta = json.load(f) robotEndEff2Cam = np.array(meta.get('hand_eye_calibration')).reshape(4, 4) intr = meta.get('intr') robot2endEff_tf = meta.get('robot2endEff_tf') robot2endEff_tf = np.array(robot2endEff_tf).reshape((4, 4)) depth_intrinsic = rs.intrinsics() depth_intrinsic.width = intr.get('width') depth_intrinsic.height = intr.get('height') depth_intrinsic.ppx = intr.get('ppx') depth_intrinsic.ppy = intr.get('ppy') depth_intrinsic.fx = intr.get('fx') depth_intrinsic.fy = intr.get('fy') depth_intrinsic.model = rs.distortion.inverse_brown_conrady depth_intrinsic.coeffs = intr.get('coeffs') robot2Cam_ft = np.dot(robot2endEff_tf, robotEndEff2Cam) points.append(list(robot2Cam_ft[:3,3])) next_arrow = [] for i in range(3): new_arrow = np.identity(4) new_arrow[i, 3] = l_arrow new_arrow = np.dot(robot2Cam_ft, new_arrow) next_arrow.append(new_arrow[:3, 3]) arrows.append(next_arrow) points = np.array(points) point_cloud = o3d.geometry.PointCloud() point_cloud.points = o3d.utility.Vector3dVector(points) voxel_size = np.inf for i, p1 in enumerate(points): for j, p2 in enumerate(points): if i != j: dist = int(np.linalg.norm(p2-p1)) if dist < voxel_size: voxel_size = dist if voxel_size == 0: break if voxel_size == 0: break while True: c_point_cloud = copy.deepcopy(point_cloud) c_point_cloud = c_point_cloud.voxel_down_sample(voxel_size=voxel_size) l = len(np.array(c_point_cloud.points)) if l == n_viewpoints: selected_points = np.array(c_point_cloud.points) break elif l < n_viewpoints: voxel_size -= 1 c_point_cloud = copy.deepcopy(point_cloud) c_point_cloud = c_point_cloud.voxel_down_sample(voxel_size=voxel_size) selected_points = np.random.choice(np.arange(len(np.array(c_point_cloud.points))), replace=False, size=n_viewpoints) selected_points = np.array(c_point_cloud.points)[selected_points] break else: voxel_size += 1 selection = [] for i, p1 in enumerate(selected_points): min_dist = np.inf min_dist_index = 0 for j, p2 in enumerate(points): dist = np.linalg.norm(p2-p1) if dist < min_dist: min_dist = dist min_dist_index = j selection.append(min_dist_index) points = points[selection] new_selection = [] new_selection.append(np.argmin(np.linalg.norm(points, axis=1))) while len(new_selection) != n_viewpoints: min_dist = np.inf min_dist_index = 0 for j, p in enumerate(points): if j not in new_selection: dist = np.linalg.norm(p-points[new_selection[-1]]) if dist < min_dist: min_dist = dist min_dist_index = j new_selection.append(min_dist_index) selection = np.array(selection) new_selection = selection[new_selection] if plot: fig = plt.figure() ax = fig.add_subplot(111, projection='3d') robot_home = [0.46051215031184256, -193.22331249713898, 1000.9851455688477] ax.scatter(robot_home[0], robot_home[1], robot_home[2], c='g', marker='^') ax.scatter(reference_point[0], reference_point[1], reference_point[2], c='b', marker='o') vXs = points[:, 0] vYs = points[:, 1] vZs = points[:, 2] ax.scatter(vXs, vYs, vZs, c='r', marker='x') arrows = np.array(arrows) arrows = arrows[selection] for i in range(len(arrows)): cax, cay, caz = arrows[i] cx = vXs[i] cy = vYs[i] cz = vZs[i] #a = Arrow3D([cx, cax[0]], [cy, cax[1]], [cz, cax[2]], color='r') #ax.add_artist(a) #a = Arrow3D([cx, cay[0]], [cy, cay[1]], [cz, cay[2]], color='b') #ax.add_artist(a) a = Arrow3D([cx, caz[0]], [cy, caz[1]], [cz, caz[2]], color='g') ax.add_artist(a) ax.set_xlim([-700, 500]) ax.set_ylim([-1200, 0]) ax.set_zlim([-200, 1000]) ax.set_xlabel('X [mm]') ax.set_ylabel('Y [mm]') ax.set_zlabel('Z [mm]') plt.show() return new_selection
import numpy as np import cv2 import pyrealsense2 as rs import time, sys, glob focal = 0.0021 baseline = 0.08 sd = rs.software_device() depth_sensor = sd.add_sensor("Depth") intr = rs.intrinsics() intr.width = 848 intr.height = 480 intr.ppx = 637.951293945312 intr.ppy = 360.783233642578 intr.fx = 638.864135742188 intr.fy = 638.864135742188 vs = rs.video_stream() vs.type = rs.stream.infrared vs.fmt = rs.format.y8 vs.index = 1 vs.uid = 1 vs.width = intr.width vs.height = intr.height vs.fps = 30 vs.bpp = 1 vs.intrinsics = intr depth_sensor.add_video_stream(vs)