def receive_image(self, img_msg: ImageMsg):
        """Process and republish new camera image.

        The image is cropped to simulate the internal preprocessing of our real camera.

        Args:
            img_msg: Published image message.
        """

        br = CvBridge()
        cv_img = br.imgmsg_to_cv2(img_msg)

        new_img = cv_img[self.param.output_start_y:self.param.output_end_y,
                         self.param.output_start_x:self.param.output_end_x, ]

        if self.param.apply_gan:
            new_img = self.gan_translator(
                new_img,
                f_keep_pixels=self.param.factor_keep_pixels,
                f_keep_colored_pixels=self.param.factor_keep_colored_pixels,
            )
        else:
            new_img = cv2.cvtColor(new_img, cv2.COLOR_BGR2GRAY)

        out_msg = br.cv2_to_imgmsg(new_img, encoding="mono8")
        out_msg.header = img_msg.header

        self.publisher.publish(out_msg)
Esempio n. 2
0
 def __enter__(self):
     bridge = CvBridge()
     cv_img = bridge.imgmsg_to_cv2(self.image_msg,
                                   desired_encoding=self.desired_encoding)
     self.tmpfile = tempfile.NamedTemporaryFile(suffix=".jpg")
     cv2.imwrite(self.tmpfile.name, cv_img)
     return self.tmpfile.__enter__()
Esempio n. 3
0
    def update_sensors(self):
        # print "light:", self._bridge.get_light_sensor()
        # print "floor:", self._bridge.get_floor_sensors()

        ## Send image
        if self.enabled_sensors['camera']:
            # Get Image
            image = self._bridge.get_image()

            if image is not None:
                nimage = np.asarray(image)
                image_msg = CvBridge().cv2_to_imgmsg(nimage, "rgb8")
                self.image_publisher.publish(image_msg)

        # Send accelerometer sensor values
        if self.enabled_sensors['accelerometer']:
            accel = self._bridge.get_accelerometer()

            self.accelerometer_publisher.publish(Vector3(*accel))

        # Send the motor positions
        if self.enabled_sensors["motor_position"]:
            motor_position = self._bridge.get_motor_position()

            self.motor_position_publisher.publish(
                UInt32MultiArray(data=list(motor_position)))

        # Send the proximity sensor values
        if self.enabled_sensors["proximity"]:
            proximity = self._bridge.get_proximity()

            self.proximity_publisher.publish(
                UInt32MultiArray(data=list(proximity)))
Esempio n. 4
0
    def __init__(self, path, rate=20, timeout=1):
        self.tfl = tf.TransformListener(True, rospy.Duration(timeout))
        self.transforms = []
        self.start_time = None
        self.path = path
        self.frames = rospy.get_param(
            '/recorder/frames',
            []) if rospy.get_param('/recorder/enabled/frames') else []
        self.world = "base"
        self.rate_hz = rate
        self.rate = rospy.Rate(rate)
        self.microrate = rospy.Rate(1000)
        self.bridge = CvBridge()
        self.ready = False  # True when all components are ready
        self.recording = False  # True when all components are ready and the user asked to start

        # Enabled cameras
        self.cameras_enabled = {
            'kinect': rospy.get_param('/recorder/enabled/kinect', False),
            'depth': rospy.get_param('/recorder/enabled/depth', False),
            'left': rospy.get_param('/recorder/enabled/left', True),
            'right': rospy.get_param('/recorder/enabled/right', True),
            'head': rospy.get_param('/recorder/enabled/head', False)
        }

        # Non-cameras sources to record as well
        self.components_enabled = {
            'frames': rospy.get_param('/recorder/enabled/frames', True)
        }

        # Recording is triggered when all components are ready
        self.readiness_lock = RLock()
        self.components_ready = {
            'kinect': False,
            'depth': False,
            'left': False,
            'right': False,
            'head': False,
            'clock': not rospy.get_param('use_sim_time', default=False)
        }

        self.frames = rospy.get_param(
            '/recorder/frames') if self.components_enabled['frames'] else []
        self.image = {
            'left': None,
            'right': None,
            'head': None,
            'kinect': None,
            'depth': None
        }
        self.locks = {
            'left': RLock(),
            'right': RLock(),
            'head': RLock(),
            'kinect': RLock(),
            'depth': RLock()
        }
        # self.four_cc = cv.CV_FOURCC('F' ,'M','P', '4')
        self.four_cc = cv2.VideoWriter_fourcc('F', 'M', 'P', '4')
        self.extension = '.avi'
        self.writers = {
            'left': None,
            'right': None,
            'head': None,
            'kinect': None,
            'depth': None
        }
        self.formats = {
            'left': 'bgr8',
            'right': 'bgr8',
            'head': 'bgr8',
            'kinect': 'bgr8',
            'depth': '8UC1'
        }  #'depth': 32FC1?}

        self.clock_sub = rospy.Subscriber('/clock',
                                          Clock,
                                          self.cb_clock,
                                          queue_size=1)
        self.right_image_sub = rospy.Subscriber(
            '/cameras/right_hand_camera/image',
            Image,
            self.cb_image_right,
            queue_size=1)
        self.left_image_sub = rospy.Subscriber(
            '/cameras/left_hand_camera/image',
            Image,
            self.cb_image_left,
            queue_size=1)
        self.head_image_sub = rospy.Subscriber('/usb_cam/image_raw',
                                               Image,
                                               self.cb_image_head,
                                               queue_size=1)
        self.kinect_rgb_sub = rospy.Subscriber('/camera/rgb/image_color',
                                               Image,
                                               self.cb_kinect_rgb,
                                               queue_size=1)
        self.kinect_depth_sub = rospy.Subscriber(
            '/camera/depth_registered/disparity',
            DisparityImage,
            self.cb_kinect_depth,
            queue_size=1)
    def update_sensors(self):
        # print "accelerometer:", self._bridge.get_accelerometer()
        # print "proximity:", self._bridge.get_proximity()
        # print "light:", self._bridge.get_light_sensor()
        # print "motor_position:", self._bridge.get_motor_position()
        # print "floor:", self._bridge.get_floor_sensors()
        # print "image:", self._bridge.get_image()

        ## If image from camera
        if self.enabled_sensors['camera']:
            # Get Image
            image = self._bridge.get_image()
            #print image
            if image is not None:
                nimage = np.asarray(image)
                image_msg = CvBridge().cv2_to_imgmsg(nimage, "rgb8")
                self.image_publisher.publish(image_msg)

        if self.enabled_sensors['proximity']:
            prox_sensors = self._bridge.get_proximity()
            for i in range(0, 8):
                if prox_sensors[i] > 0:
                    self.prox_msg[i].range = 0.5 / math.sqrt(
                        prox_sensors[i]
                    )  # Transform the analog value to a distance value in meters (given from field tests).
                else:
                    self.prox_msg[i].range = self.prox_msg[i].max_range

                if self.prox_msg[i].range > self.prox_msg[i].max_range:
                    self.prox_msg[i].range = self.prox_msg[i].max_range
                if self.prox_msg[i].range < self.prox_msg[i].min_range:
                    self.prox_msg[i].range = self.prox_msg[i].min_range
                self.prox_msg[i].header.stamp = rospy.Time.now()
                self.prox_publisher[i].publish(self.prox_msg[i])

            # e-puck proximity positions (cm), x pointing forward, y pointing left
            #           P7(3.5, 1.0)   P0(3.5, -1.0)
            #       P6(2.5, 2.5)           P1(2.5, -2.5)
            #   P5(0.0, 3.0)                   P2(0.0, -3.0)
            #       P4(-3.5, 2.0)          P3(-3.5, -2.0)
            #
            # e-puck proximity orentations (degrees)
            #           P7(10)   P0(350)
            #       P6(40)           P1(320)
            #   P5(90)                   P2(270)
            #       P4(160)          P3(200)
            self.br.sendTransform(
                (0.035, -0.010, 0.034),
                tf.transformations.quaternion_from_euler(0, 0, 6.11),
                rospy.Time.now(), self._name + "/base_prox0",
                self._name + "/base_link")
            self.br.sendTransform(
                (0.025, -0.025, 0.034),
                tf.transformations.quaternion_from_euler(0, 0, 5.59),
                rospy.Time.now(), self._name + "/base_prox1",
                self._name + "/base_link")
            self.br.sendTransform(
                (0.000, -0.030, 0.034),
                tf.transformations.quaternion_from_euler(0, 0, 4.71),
                rospy.Time.now(), self._name + "/base_prox2",
                self._name + "/base_link")
            self.br.sendTransform(
                (-0.035, -0.020, 0.034),
                tf.transformations.quaternion_from_euler(0, 0, 3.49),
                rospy.Time.now(), self._name + "/base_prox3",
                self._name + "/base_link")
            self.br.sendTransform(
                (-0.035, 0.020, 0.034),
                tf.transformations.quaternion_from_euler(0, 0, 2.8),
                rospy.Time.now(), self._name + "/base_prox4",
                self._name + "/base_link")
            self.br.sendTransform(
                (0.000, 0.030, 0.034),
                tf.transformations.quaternion_from_euler(0, 0, 1.57),
                rospy.Time.now(), self._name + "/base_prox5",
                self._name + "/base_link")
            self.br.sendTransform(
                (0.025, 0.025, 0.034),
                tf.transformations.quaternion_from_euler(0, 0, 0.70),
                rospy.Time.now(), self._name + "/base_prox6",
                self._name + "/base_link")
            self.br.sendTransform(
                (0.035, 0.010, 0.034),
                tf.transformations.quaternion_from_euler(0, 0, 0.17),
                rospy.Time.now(), self._name + "/base_prox7",
                self._name + "/base_link")

        if self.enabled_sensors['accelerometer']:
            accel = self._bridge.get_accelerometer()
            accel_msg = Imu()
            accel_msg.header.stamp = rospy.Time.now()
            accel_msg.header.frame_id = self._name + "/base_link"
            accel_msg.linear_acceleration.x = (
                accel[1] - 2048.0
            ) / 800.0 * 9.81  # 1 g = about 800, then transforms in m/s^2.
            accel_msg.linear_acceleration.y = (accel[0] -
                                               2048.0) / 800.0 * 9.81
            accel_msg.linear_acceleration.z = (accel[2] -
                                               2048.0) / 800.0 * 9.81
            accel_msg.linear_acceleration_covariance = [
                0.01, 0.0, 0.0, 0.0, 0.01, 0.0, 0.0, 0.0, 0.01
            ]
            #print "accel raw: " + str(accel[0]) + ", " + str(accel[1]) + ", " + str(accel[2])
            #print "accel (m/s2): " + str((accel[0]-2048.0)/800.0*9.81) + ", " + str((accel[1]-2048.0)/800.0*9.81) + ", " + str((accel[2]-2048.0)/800.0*9.81)
            accel_msg.angular_velocity.x = 0
            accel_msg.angular_velocity.y = 0
            accel_msg.angular_velocity.z = 0
            accel_msg.angular_velocity_covariance = [
                0.01, 0.0, 0.0, 0.0, 0.01, 0.0, 0.0, 0.0, 0.01
            ]
            q = tf.transformations.quaternion_from_euler(0, 0, 0)
            accel_msg.orientation = Quaternion(*q)
            accel_msg.orientation_covariance = [
                0.01, 0.0, 0.0, 0.0, 0.01, 0.0, 0.0, 0.0, 0.01
            ]
            self.accel_publisher.publish(accel_msg)

        if self.enabled_sensors['motor_position']:
            motor_pos = list(
                self._bridge.get_motor_position()
            )  # Get a list since tuple elements returned by the function are immutable.
            # Convert to 16 bits signed integer.
            if (motor_pos[0] & 0x8000):
                motor_pos[0] = -0x10000 + motor_pos[0]
            if (motor_pos[1] & 0x8000):
                motor_pos[1] = -0x10000 + motor_pos[1]
            #print "motor_pos: " + str(motor_pos[0]) + ", " + str(motor_pos[1])

            self.leftStepsDiff = motor_pos[
                0] * MOT_STEP_DIST - self.leftStepsPrev  # Expressed in meters.
            self.rightStepsDiff = motor_pos[
                1] * MOT_STEP_DIST - self.rightStepsPrev  # Expressed in meters.
            #print "left, right steps diff: " + str(self.leftStepsDiff) + ", " + str(self.rightStepsDiff)

            self.deltaTheta = (self.rightStepsDiff - self.leftStepsDiff
                               ) / WHEEL_DISTANCE  # Expressed in radiant.
            self.deltaSteps = (self.rightStepsDiff +
                               self.leftStepsDiff) / 2  # Expressed in meters.
            #print "delta theta, steps: " + str(self.deltaTheta) + ", " + str(self.deltaSteps)

            self.x_pos += self.deltaSteps * math.cos(
                self.theta + self.deltaTheta / 2)  # Expressed in meters.
            self.y_pos += self.deltaSteps * math.sin(
                self.theta + self.deltaTheta / 2)  # Expressed in meters.
            self.theta += self.deltaTheta  # Expressed in radiant.
            #print "x, y, theta: " + str(self.x_pos) + ", " + str(self.y_pos) + ", " + str(self.theta)

            self.leftStepsPrev = motor_pos[
                0] * MOT_STEP_DIST  # Expressed in meters.
            self.rightStepsPrev = motor_pos[
                1] * MOT_STEP_DIST  # Expressed in meters.

            odom_msg = Odometry()
            odom_msg.header.stamp = rospy.Time.now()
            odom_msg.header.frame_id = "odom"
            odom_msg.child_frame_id = self._name + "/base_link"
            odom_msg.pose.pose.position = Point(self.x_pos, self.y_pos, 0)
            q = tf.transformations.quaternion_from_euler(0, 0, self.theta)
            odom_msg.pose.pose.orientation = Quaternion(*q)
            self.endTime = time.time()
            odom_msg.twist.twist.linear.x = self.deltaSteps / (
                self.endTime - self.startTime
            )  # self.deltaSteps is the linear distance covered in meters from the last update (delta distance);
            # the time from the last update is measured in seconds thus to get m/s we multiply them.
            odom_msg.twist.twist.angular.z = self.deltaTheta / (
                self.endTime - self.startTime
            )  # self.deltaTheta is the angular distance covered in radiant from the last update (delta angle);
            # the time from the last update is measured in seconds thus to get rad/s we multiply them.
            #print "time elapsed = " + str(self.endTime-self.startTime) + " seconds"
            self.startTime = self.endTime

            self.odom_publisher.publish(odom_msg)
            pos = (odom_msg.pose.pose.position.x,
                   odom_msg.pose.pose.position.y,
                   odom_msg.pose.pose.position.z)
            ori = (odom_msg.pose.pose.orientation.x,
                   odom_msg.pose.pose.orientation.y,
                   odom_msg.pose.pose.orientation.z,
                   odom_msg.pose.pose.orientation.w)
            self.br.sendTransform(pos, ori, odom_msg.header.stamp,
                                  odom_msg.child_frame_id,
                                  odom_msg.header.frame_id)

        if self.enabled_sensors['light']:
            light_sensors = self._bridge.get_light_sensor()
            light_sensors_marker_msg = Marker()
            light_sensors_marker_msg.header.frame_id = self._name + "/base_link"
            light_sensors_marker_msg.header.stamp = rospy.Time.now()
            light_sensors_marker_msg.type = Marker.TEXT_VIEW_FACING
            light_sensors_marker_msg.pose.position.x = 0.15
            light_sensors_marker_msg.pose.position.y = 0
            light_sensors_marker_msg.pose.position.z = 0.15
            q = tf.transformations.quaternion_from_euler(0, 0, 0)
            light_sensors_marker_msg.pose.orientation = Quaternion(*q)
            light_sensors_marker_msg.scale.z = 0.01
            light_sensors_marker_msg.color.a = 1.0
            light_sensors_marker_msg.color.r = 1.0
            light_sensors_marker_msg.color.g = 1.0
            light_sensors_marker_msg.color.b = 1.0
            light_sensors_marker_msg.text = "light: " + str(light_sensors)
            self.light_publisher.publish(light_sensors_marker_msg)

        if self.enabled_sensors['floor']:
            floor_sensors = self._bridge.get_floor_sensors()
            floor_marker_msg = Marker()
            floor_marker_msg.header.frame_id = self._name + "/base_link"
            floor_marker_msg.header.stamp = rospy.Time.now()
            floor_marker_msg.type = Marker.TEXT_VIEW_FACING
            floor_marker_msg.pose.position.x = 0.15
            floor_marker_msg.pose.position.y = 0
            floor_marker_msg.pose.position.z = 0.13
            q = tf.transformations.quaternion_from_euler(0, 0, 0)
            floor_marker_msg.pose.orientation = Quaternion(*q)
            floor_marker_msg.scale.z = 0.01
            floor_marker_msg.color.a = 1.0
            floor_marker_msg.color.r = 1.0
            floor_marker_msg.color.g = 1.0
            floor_marker_msg.color.b = 1.0
            floor_marker_msg.text = "floor: " + str(floor_sensors)
            self.floor_publisher.publish(floor_marker_msg)

        if self.enabled_sensors['selector']:
            curr_sel = self._bridge.get_selector()
            selector_marker_msg = Marker()
            selector_marker_msg.header.frame_id = self._name + "/base_link"
            selector_marker_msg.header.stamp = rospy.Time.now()
            selector_marker_msg.type = Marker.TEXT_VIEW_FACING
            selector_marker_msg.pose.position.x = 0.15
            selector_marker_msg.pose.position.y = 0
            selector_marker_msg.pose.position.z = 0.11
            q = tf.transformations.quaternion_from_euler(0, 0, 0)
            selector_marker_msg.pose.orientation = Quaternion(*q)
            selector_marker_msg.scale.z = 0.01
            selector_marker_msg.color.a = 1.0
            selector_marker_msg.color.r = 1.0
            selector_marker_msg.color.g = 1.0
            selector_marker_msg.color.b = 1.0
            selector_marker_msg.text = "selector: " + str(curr_sel)
            self.selector_publisher.publish(selector_marker_msg)

        if self.enabled_sensors['motor_speed']:
            motor_speed = list(
                self._bridge.get_motor_speed()
            )  # Get a list since tuple elements returned by the function are immutable.
            # Convert to 16 bits signed integer.
            if (motor_speed[0] & 0x8000):
                motor_speed[0] = -0x10000 + motor_speed[0]
            if (motor_speed[1] & 0x8000):
                motor_speed[1] = -0x10000 + motor_speed[1]
            motor_speed_marker_msg = Marker()
            motor_speed_marker_msg.header.frame_id = self._name + "/base_link"
            motor_speed_marker_msg.header.stamp = rospy.Time.now()
            motor_speed_marker_msg.type = Marker.TEXT_VIEW_FACING
            motor_speed_marker_msg.pose.position.x = 0.15
            motor_speed_marker_msg.pose.position.y = 0
            motor_speed_marker_msg.pose.position.z = 0.09
            q = tf.transformations.quaternion_from_euler(0, 0, 0)
            motor_speed_marker_msg.pose.orientation = Quaternion(*q)
            motor_speed_marker_msg.scale.z = 0.01
            motor_speed_marker_msg.color.a = 1.0
            motor_speed_marker_msg.color.r = 1.0
            motor_speed_marker_msg.color.g = 1.0
            motor_speed_marker_msg.color.b = 1.0
            motor_speed_marker_msg.text = "speed: " + str(motor_speed)
            self.motor_speed_publisher.publish(motor_speed_marker_msg)

        if self.enabled_sensors['microphone']:
            mic = self._bridge.get_microphone()
            microphone_marker_msg = Marker()
            microphone_marker_msg.header.frame_id = self._name + "/base_link"
            microphone_marker_msg.header.stamp = rospy.Time.now()
            microphone_marker_msg.type = Marker.TEXT_VIEW_FACING
            microphone_marker_msg.pose.position.x = 0.15
            microphone_marker_msg.pose.position.y = 0
            microphone_marker_msg.pose.position.z = 0.07
            q = tf.transformations.quaternion_from_euler(0, 0, 0)
            microphone_marker_msg.pose.orientation = Quaternion(*q)
            microphone_marker_msg.scale.z = 0.01
            microphone_marker_msg.color.a = 1.0
            microphone_marker_msg.color.r = 1.0
            microphone_marker_msg.color.g = 1.0
            microphone_marker_msg.color.b = 1.0
            microphone_marker_msg.text = "microphone: " + str(mic)
            self.microphone_publisher.publish(microphone_marker_msg)
Esempio n. 6
0
 def __enter__(self):
     bridge = CvBridge()
     cv_img = bridge.imgmsg_to_cv2(self.image_msg, desired_encoding=self.desired_encoding)
     self.tmpfile = tempfile.NamedTemporaryFile(suffix=".jpg")
     cv2.imwrite(self.tmpfile.name, cv_img)
     return self.tmpfile.__enter__()
Esempio n. 7
0
    def __init__(self, time_scale, gvf, num_features, features_to_use,
                 behavior_policy, target_policy):

        self.features_to_use = set(features_to_use + ['core', 'ir'])
        topics = filter(lambda x: x,
                        [tools.features[f] for f in self.features_to_use])

        # set up dictionary to receive sensor info
        self.recent = {topic: Queue(0) for topic in topics}

        # set up ros
        rospy.init_node('agent', anonymous=True)

        # setup sensor parsers
        for topic in topics:
            rospy.Subscriber(topic, tools.topic_format[topic],
                             self.recent[topic].put)
        self.topics = topics

        rospy.loginfo("Started sensor threads.")

        # smooth out the actions
        self.time_scale = time_scale
        self.r = rospy.Rate(int(1.0 / self.time_scale))

        # agent info
        self.gvf = gvf
        self.target_policy = target_policy
        self.behavior_policy = behavior_policy

        self.state_manager = StateManager(features_to_use)
        self.feature_indices = np.concatenate(
            [StateConstants.indices_in_phi[f] for f in features_to_use])
        self.img_to_cv2 = CvBridge().compressed_imgmsg_to_cv2

        # information for managing the shift between the target and behavior
        #  policies
        self.following_mu = 0
        self.following_pi = 1
        self.current_condition = self.following_mu

        self.current_policy = self.behavior_policy
        self.fixed_steps_under_pi = 100
        self.fixed_step_under_mu = 50
        self.mu_max_horizon = 50
        self.steps_under_mu = self.fixed_step_under_mu + np.random.randint(
            self.mu_max_horizon)

        # MSRE information
        self.sample_size = 1000
        self.samples_phi = np.zeros((self.sample_size, num_features))
        self.samples_G = np.zeros(self.sample_size)

        self.cumulant_buffer = np.zeros(self.fixed_steps_under_pi)
        self.gamma_buffer = np.zeros(self.fixed_steps_under_pi)

        # Set up publishers
        action_publisher = rospy.Publisher('action_cmd',
                                           geom_msg.Twist,
                                           queue_size=1)
        termination_publisher = rospy.Publisher('termination',
                                                Bool,
                                                queue_size=1)
        self.publishers = {
            'action': action_publisher,
            'termination': termination_publisher
        }
        rospy.loginfo("Done LearningForeground init.")
    def receive_image(self, msg: ImageMsg):
        """Receive new camera image and publish corresponding labels."""
        try:
            tf_transform = self.listener.lookup_transform(
                "vehicle", "sim_world", msg.header.stamp, timeout=rospy.Duration(0.1)
            )
            current_tf = Transform(tf_transform.transform)
        except Exception as e:
            rospy.logerr(f"Could not lookup transform {e}")
            return

        # Pass latest car state to the speaker to ensure tf and
        # car state are approximately synchronized
        self.label_speaker.listen(self._latest_car_state_msg)

        try:
            # Pause physics to prevent the car from moving any further
            # while calculating the bounding boxes
            self.pause_physics_proxy()

            image_size = (
                self.camera_specs.output_size["width"],
                self.camera_specs.output_size["height"],
            )

            # Update transformations
            BoundingBox.set_tfs(current_tf, self.camera_specs.P)

            visible_bbs = self.label_speaker.speak(
                image_size, self.camera_specs.horizontal_fov
            )

            self.label_publisher.publish(
                ImageLabelsMsg(
                    img_header=msg.header,
                    bounding_boxes=[bb.to_msg() for bb in visible_bbs],
                )
            )

            # Optionally publish an image with bounding boxes drawn into it
            if self.param.publish_debug_image:
                colors = self.param.colors.as_dict()

                visualization_bbs = (
                    VisualBoundingBox(
                        bb.get_bounds(),
                        label=bb.class_description,
                        color=colors[str(bb.class_id // 100)]
                        if str(bb.class_id // 100) in colors
                        # ID Namespaces are in steps of 100.
                        else colors[str(-1)],
                    )
                    for bb in visible_bbs
                )

                br = CvBridge()
                cv_img = br.imgmsg_to_cv2(msg)
                cv_img: np.ndarray = cv_img.copy()
                cv_img = cv2.cvtColor(cv_img, cv2.COLOR_GRAY2RGB)

                for bb in visualization_bbs:
                    bb.draw(cv_img)
                out_msg = br.cv2_to_imgmsg(cv_img, encoding="rgb8")
                out_msg.header = msg.header

                self.image_publisher.publish(out_msg)
        finally:
            self.unpause_physics_proxy()
Esempio n. 9
0
def image_parse(img, enc='passthrough'):
    """ Converts a ros image to a numpy array
    """
    br = CvBridge()
    image = np.asarray(br.imgmsg_to_cv2(img, desired_encoding=enc))
    return image
if __name__ == "__main__":
    try:

        camera = arducam.mipi_camera()
        print("Open camera...")
        camera.init_camera()
        print("Setting the resolution...")

        fmt = camera.set_resolution(640, 400)
        print("Current resolution is {}".format(fmt))

        rospy.init_node("arducam_ros_node")
        pub = rospy.Publisher("arducam/camera/image_raw", Image, queue_size=1)

        bridge = CvBridge()
        exposure_val = rospy.get_param('~exposure')
        cam_gain_val = rospy.get_param('~cam_gain')
        fps_val = rospy.get_param('~fps')

        cv2.startWindowThread(
        )  # open thread for showing image. Worked much smoother!

        print("Capture loop!")
        camera.set_control(v4l2.V4L2_CID_EXPOSURE, exposure_val)
        camera.set_control(v4l2.V4L2_CID_GAIN, cam_gain_val)
        #r = rospy.Rate(3) # set rate of 30 Hz
        while not rospy.is_shutdown():

            frame = camera.capture(encoding='raw')
            height = int(align_up(fmt[1], 16))