def run(self):
        while True:
            try:
                # `tfds.as_numpy` converts `tf.Tensor` -> `np.array`
                for ex in tfds.as_numpy(self.ds):
                    for image, label in zip(ex['image'],ex['label']):
                        start=time.time()
                        header=Header()
                        set_timestamp(header,time.time())
                        if not image.shape[2]==1:
                            image=cv2.cvtColor(image,cv2.COLOR_RGB2BGR)
                        image_msg=from_ndarray(image,header)
                        

                        label_msg=Label()
                        label_msg.header=header
                        label_msg.confidence=1.0
                        # `int2str` returns the human readable label ('dog', 'car',...)
                        label_msg.class_name=self.info.features['label'].int2str(label)
                        

                        self.publish("image",image_msg)
                        self.publish("label",label_msg)

                        if (1.0/self.get_property("frame_rate")) > (time.time()-start):
                            time.sleep((1.0/self.get_property("frame_rate"))-(time.time()-start))               
            except:
                pass
Ejemplo n.º 2
0
    def _publish_imu(self, carla_imu_measurement):
        """
        Function to transform a received imu measurement into a ROS Imu message
        :param carla_imu_measurement: carla imu measurement object
        :type carla_imu_measurement: carla.IMUMeasurement
        """
        imu_msg = Imu()
        imu_msg.header.frame_id = self._imu_frame
        set_timestamp(imu_msg.header, carla_imu_measurement.timestamp)
        # Carla uses a left-handed coordinate convention (X forward, Y right, Z up).
        # Here, these measurements are converted to the right-handed ROS convention
        #  (X forward, Y left, Z up).
        imu_msg.angular_velocity.x = -carla_imu_measurement.gyroscope.x
        imu_msg.angular_velocity.y = carla_imu_measurement.gyroscope.y
        imu_msg.angular_velocity.z = -carla_imu_measurement.gyroscope.z

        imu_msg.linear_acceleration.x = carla_imu_measurement.accelerometer.x
        imu_msg.linear_acceleration.y = -carla_imu_measurement.accelerometer.y
        imu_msg.linear_acceleration.z = carla_imu_measurement.accelerometer.z

        imu_rotation = carla_imu_measurement.transform.rotation

        quat = trans.carla_rotation_to_numpy_quaternion(imu_rotation)
        imu_msg.orientation = trans.numpy_quaternion_to_ros_quaternion(quat)
        self.publish("imu", imu_msg)
Ejemplo n.º 3
0
    def _publish_pointcloud(self, carla_lidar_measurement):
        """
        Function to transform the a received lidar measurement into a ROS point cloud message
        :param carla_lidar_measurement: carla lidar measurement object
        :type carla_lidar_measurement: carla.LidarMeasurement
        """
        header = Header()
        set_timestamp(header, carla_lidar_measurement.timestamp)
        header.frame_id = self._lidar_frame
        fields = [
            PointField("x", 0, PointField.FLOAT32, 1),
            PointField("y", 4, PointField.FLOAT32, 1),
            PointField("z", 8, PointField.FLOAT32, 1),
            PointField("intensity", 12, PointField.FLOAT32, 1),
        ]

        lidar_data = (
            np.frombuffer(carla_lidar_measurement.raw_data, dtype=np.float32)
            .reshape([-1, 4])
            .copy()
        )
        # we take the oposite of y axis
        # (as lidar point are express in left handed coordinate system, and ros need right handed)
        lidar_data[:, 1] *= -1
        point_cloud_msg = create_cloud(header, fields, lidar_data)
        self.publish("lidar", point_cloud_msg)
Ejemplo n.º 4
0
 def __bird_eye_image(self, image):
     carla_image_data_array = np.ndarray(
         shape=(image.height, image.width, 4), dtype=np.uint8, buffer=image.raw_data
     )
     header = Header()
     set_timestamp(header, image.timestamp)
     img_msg = from_ndarray(carla_image_data_array[:, :, :3], header)
     self.publish("bird_eye", img_msg)
Ejemplo n.º 5
0
    def training(self):

        self.alert("Training Mode", "INFO")
        for epoch in range(
                self.epochs):  # loop over the dataset multiple times
            self.alert("Epochs: %i / %i" % (epoch + 1, self.epochs), "INFO")
            # Reset the metrics at the start of the next epoch
            self.train_loss.reset_states()
            self.train_accuracy.reset_states()

            while self.mini_batch_counter < self.mini_batches:
                if not self.batches.empty():
                    start = time.time()
                    self.batch_transform()
                    # get the inputs; data is a list of [inputs, labels]
                    images = self.image_batch / 255.0
                    labels = self.label_batch
                    self.train_step(images, labels)
                    self.mini_batch_counter += 1
                    self.image_batch = None
                    self.label_batch = []
            self.mini_batch_counter = 0
            # Publish the loss for every epoch
            Loss = Float64()
            header = Header()
            set_timestamp(header, time.time())
            Loss.header = header
            Loss.data = self.train_loss.result()

            # Publish the accuracy for every epoch
            Accuracy = Float64()
            Accuracy.header = header
            Accuracy.data = self.train_accuracy.result() * 100

            self.publish('loss', Loss)
            self.publish('accuracy', Accuracy)

            with self.writer.as_default():
                tf.summary.scalar("epoch_loss",
                                  self.train_loss.result(),
                                  step=(epoch + 1))
                tf.summary.scalar("epoch_accuarcy",
                                  self.train_accuracy.result() * 100,
                                  step=(epoch + 1))

            if not (epoch + 1) == self.epochs:
                path = os.path.join(self.model_path, self.model_name)
                self.model.save(path)

        self.alert("Training is completed.", "INFO")

        self.alert("Saving the model is started", "INFO")
        path = os.path.join(self.model_path, self.model_name)
        self.model.save(path)
        self.alert("Saving the model is completed", "INFO")
Ejemplo n.º 6
0
 def _publish_lane_inv(self, lane_invasion_event):
     """
     Function to wrap the lane invasion event into a ros messsage
     :param lane_invasion_event: carla lane invasion event object
     :type lane_invasion_event: carla.LaneInvasionEvent
     """
     lane_invasion_msg = CarlaLaneInvasionEvent()
     lane_invasion_msg.header.frame_id = "ego_vehicle"
     set_timestamp(lane_invasion_msg.header, lane_invasion_event.timestamp)
     for marking in lane_invasion_event.crossed_lane_markings:
         lane_invasion_msg.crossed_lane_markings.append(marking.type)
     self.publish("lanes_invasion", lane_invasion_msg)
Ejemplo n.º 7
0
 def _publish_gnss(self, carla_gnss_measurement):
     """
     Function to transform a received gnss event into a ROS NavSatFix message
     :param carla_gnss_measurement: carla gnss measurement object
     :type carla_gnss_measurement: carla.GnssMeasurement
     """
     navsatfix_msg = NavSatFix()
     set_timestamp(navsatfix_msg.header, carla_gnss_measurement.timestamp)
     navsatfix_msg.header.frame_id = self._gnss_frame
     navsatfix_msg.latitude = carla_gnss_measurement.latitude
     navsatfix_msg.longitude = carla_gnss_measurement.longitude
     navsatfix_msg.altitude = carla_gnss_measurement.altitude
     self.publish("fix", navsatfix_msg)
Ejemplo n.º 8
0
 def _publish_bgr_image(self, image, port_key="rgb_camera"):
     """callback function to rgb camera sensor
     it converts the image to ROS image in BGR8 encoding
     """
     carla_image_data_array = np.ndarray(
         shape=(image.height, image.width, 4), dtype=np.uint8, buffer=image.raw_data
     )
     header = Header()
     set_timestamp(header, image.timestamp)
     img_msg = from_ndarray(carla_image_data_array[:, :, :3], header)
     self.publish(port_key + "/image_raw", img_msg)
     self._camera_info.header = header
     self.publish(port_key + "/camera_info", self._camera_info)
Ejemplo n.º 9
0
    def testing(self):

        self.alert("Testing Mode", "INFO")
        self.alert("Loading the model...", "INFO")
        self.model = tf.keras.models.load_model(self.model_path)
        self.alert("The model has been loaded.", "INFO")
        self.test_loss.reset_states()
        self.test_accuracy.reset_states()

        while self.mini_batch_counter < self.mini_batches:
            if not self.batches.empty():
                self.batch_transform()
                # get the inputs; data is a list of [inputs, labels]
                images = self.image_batch / 255.0
                labels = self.label_batch
                self.test_step(images, labels)
                self.mini_batch_counter += 1
                self.image_batch = None
                self.label_batch = []

        self.mini_batch_counter = 0
        # Publish the loss for every epoch
        Loss = Float64()
        header = Header()
        set_timestamp(header, time.time())
        Loss.header = header
        Loss.data = self.test_loss.result()

        # Publish the accuracy for every epoch
        Accuracy = Float64()
        Accuracy.header = header
        Accuracy.data = self.test_accuracy.result() * 100

        self.publish('loss', Loss)
        self.publish('accuracy', Accuracy)
        with self.writer.as_default():
            tf.summary.scalar("epoch_loss",
                              self.test_loss.result(),
                              step=(epoch + 1))
            tf.summary.scalar("epoch_accuarcy",
                              self.test_accuracy.result() * 100,
                              step=(epoch + 1))

        self.alert("Testing is completed.", "INFO")

        self.alert("The Testing Loss: %.3f" % (self.test_loss.result()),
                   "INFO")

        self.alert(
            "The Testing Accuracy: %.2f %%" %
            (self.test_accuracy.result() * 100), "INFO")
Ejemplo n.º 10
0
 def _publish_collision(self, collision_event):
     """
     Function to wrap the collision event into a ros messsage
     :param collision_event: carla collision event object
     :type collision_event: carla.CollisionEvent
     """
     collision_msg = CarlaCollisionEvent()
     collision_msg.header.frame_id = "ego_vehicle"
     set_timestamp(collision_msg.header, collision_event.timestamp)
     collision_msg.other_actor_id = collision_event.other_actor.id
     collision_msg.normal_impulse.x = collision_event.normal_impulse.x
     collision_msg.normal_impulse.y = collision_event.normal_impulse.y
     collision_msg.normal_impulse.z = collision_event.normal_impulse.z
     self.publish("collision", collision_msg)
Ejemplo n.º 11
0
 def _publish_semantic_seg(self, carla_image):
     """callback function to semantic segmentation camera sensor
     it converts the image to ROS image in BGR8 encoding
     """
     carla_image.convert(carla.ColorConverter.CityScapesPalette)
     carla_image_data_array = np.ndarray(
         shape=(carla_image.height, carla_image.width, 4),
         dtype=np.uint8,
         buffer=carla_image.raw_data,
     )
     header = Header()
     set_timestamp(header, carla_image.timestamp)
     img_msg = from_ndarray(carla_image_data_array[:, :, :3], header)
     self.publish("image_seg", img_msg)
Ejemplo n.º 12
0
 def _publish_radar(self, carla_radar_measurement):
     """
     Function to transform the a received Radar measurement into a ROS message
     :param carla_radar_measurement: carla Radar measurement object
     :type carla_radar_measurement: carla.RadarMeasurement
     """
     radar_msg = CarlaRadarMeasurement()
     set_timestamp(radar_msg.header, carla_radar_measurement.timestamp)
     radar_msg.header.frame_id = self._radar_frame
     for detection in carla_radar_measurement:
         radar_detection = CarlaRadarDetection()
         radar_detection.altitude = detection.altitude
         radar_detection.azimuth = detection.azimuth
         radar_detection.depth = detection.depth
         radar_detection.velocity = detection.velocity
         radar_msg.detections.append(radar_detection)
     self.publish("radar", radar_msg)
Ejemplo n.º 13
0
 def run_scenario(self):
     """execute the TraCI control loop"""
     step = 0
     vId = ""
     # we start with phase 2 where EW has green
     traci.trafficlight.setPhase("0", 2)
     while traci.simulation.getMinExpectedNumber() > 0:
         traci.simulationStep()
         vIds = traci.vehicle.getIDList()
         if step == 0:
             vId = vIds[0]
         if vId in vIds:
             header = Header()
             set_timestamp(header, time.time())
             position = traci.vehicle.getPosition(vId)
             speed = traci.vehicle.getSpeed(vId)
             acceleration = traci.vehicle.getAcceleration(vId)
             lateral_speed = traci.vehicle.getLateralSpeed(vId)
             waiting_time = traci.vehicle.getWaitingTime(vId)
             pos = Point()
             pos.x, pos.y, pos.z = position[0], position[1], 0.0
             vel, acc, lvel, wt = Float64(), Float64(), Float64(), Float64()
             vel.header, vel.data = header, speed
             acc.header, acc.data = header, acceleration
             lvel.header, lvel.data = header, lateral_speed
             wt.header, wt.data = header, waiting_time
             self.publish("pos", pos)
             self.publish("vel", vel)
             self.publish("acc", acc)
             self.publish("lateral_speed", lvel)
             self.publish("waiting_time", wt)
         if traci.trafficlight.getPhase("0") == 2:
             # we are not already switching
             if traci.inductionloop.getLastStepVehicleNumber("0") > 0:
                 # there is a vehicle from the north, switch
                 traci.trafficlight.setPhase("0", 3)
             else:
                 # otherwise try to keep green for EW
                 traci.trafficlight.setPhase("0", 2)
         step += 1
     traci.close()
     sys.stdout.flush()
    def run(self):
        while True:
            if self.play == True:
                self.play = False
                # `tfds.as_numpy` converts `tf.Tensor` -> `np.array`
                for ex in tfds.as_numpy(self.ds):
                    start = time.time()
                    header = Header()
                    set_timestamp(header, time.time())
                    image_batch_msg = ImageBatch()
                    image_batch_msg.header = header
                    if not ex['image'].shape[3] == 1:
                        image_batch_msg.Batch = [
                            from_ndarray(cv2.cvtColor(img, cv2.COLOR_RGB2BGR),
                                         header) for img in ex["image"]
                        ]
                    else:
                        image_batch_msg.Batch = [
                            from_ndarray(img, header) for img in ex["image"]
                        ]

                    labels_msg = LabelArray()
                    labels_msg.header = header
                    for label in ex['label']:
                        label_msg = Label()
                        label_msg.header = header
                        label_msg.confidence = 1.0
                        # `int2str` returns the human readable label ('dog', 'car',...)
                        label_msg.class_name = self.info.features[
                            'label'].int2str(label)
                        labels_msg.labels.append(label_msg)

                    self.publish("image_batches", image_batch_msg)
                    self.publish("labels", labels_msg)
                    end = time.time() - start
                    if (1.0 / self.get_property("frame_rate")) > end:
                        time.sleep((1.0 / self.get_property("frame_rate")) -
                                   end)

            time.sleep(0.001)
Ejemplo n.º 15
0
    def run(self):
        # Block Main Loop
        last_time = time.time()  # start time of the loop
        while True:
            # timer to limit the frame rate
            if time.time() - last_time < self.period:
                try:
                    time.sleep(self.period - (time.time() - last_time))
                except:
                    pass
            # check for the time limit for the executed command
            if self.move_command:
                if self.car:
                    self.car_controls = airsim.CarControls()
                    self.car_controls.throttle = self.command_list[0]
                    self.car_controls.steering = self.command_list[1]
                    self.client.setCarControls(self.car_controls)
                    self.alert("Moving Forward ", "INFO")
                    self.car_moved = True
                    self.stop_at = time.time() + self.command_list[2]
                else:
                    self.alert("Moving To {}".format(self.command_list), "INFO")
                    self.client.moveToPositionAsync(
                        self.command_list[0],
                        self.command_list[1],
                        self.command_list[2],
                        self.command_list[3],
                    )
                self.move_command = False
            if time.time() >= self.stop_at and self.car_moved:
                self.car_controls.throttle = 0
                self.client.setCarControls(self.car_controls)
                self.car_moved = False
                self.alert("Stopping the Car ", "INFO")
            # AirSim Default Cameras names
            cameras = [
                "front_center",
                "front_right",
                "front_left",
                "fpv",
                "back_center",
            ]
            # Reading front_center camera
            camera_id = 0
            responses = self.client.simGetImages(
                [airsim.ImageRequest(0, airsim.ImageType.Scene, False, False)]
            )
            # print('Retrieved images: %d', len(responses))
            for response in responses:
                # get numpy array
                img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8)
                # reshape array to 4 channel image array H X W X 4
                img_rgb = img1d.reshape(response.height, response.width, 3)
                # Create Header for the ROS message
                header = Header()
                set_timestamp(header, time.time())
                header.frame_id = cameras[camera_id]
                # Convert Numpy array to ROS message using yonoarc_utils.image
                img_msg = from_ndarray(img_rgb, header)
                # Publish this message on Block's output port
                self.publish("cam_{}".format(camera_id), img_msg)
                camera_id += 1
            # Read Semantic Segmentation Image from Camera 0
            # Create ROS message and publish it like the previous steps
            seg_responses = self.client.simGetImages(
                [airsim.ImageRequest(0, airsim.ImageType.Segmentation, False, False)]
            )
            img1d = np.fromstring(seg_responses[0].image_data_uint8, dtype=np.uint8)
            img_rgb = img1d.reshape(seg_responses[0].height, seg_responses[0].width, 3)
            header = Header()
            header.frame_id = "front_center"
            img_msg = from_ndarray(img_rgb, header)
            self.publish("sem_segm", img_msg)
            # Read Lidar Data and convert it to PointCloud2 ROS message
            lidarData = self.client.getLidarData()
            if len(lidarData.point_cloud) < 3:
                self.alert("\tNo points received from Lidar data", "WARN")
            else:
                points = self.parse_lidarData(lidarData)
                self.publish("lidar", points)

            try:
                states = self.client.getCarState()
                float_msg = Float32()
                float_msg.data = states.speed
                self.publish("speed", float_msg)
            except Exception as e:
                print(e)
                pass
            # Update Timer
            last_time = time.time()
Ejemplo n.º 16
0
    def run(self):
        ltime = time.time()
        idx = 0
        while True:
            if (time.time() - ltime >= 1 / self.frequency):
                if (idx == len(self.left_img_list)):
                    idx = 0
                # RGB Images
                left_img = self.read_image_file(self.left_img_list[idx])
                right_img = self.read_image_file(self.right_img_list[idx])
                header = Header()
                set_timestamp(header, time.time())
                header.frame_id = "left_img"
                left_msg = from_ndarray(left_img, header)
                self.publish("left_img", left_msg)
                header.frame_id = "right_img"
                right_msg = from_ndarray(right_img, header)
                self.publish("right_img", right_msg)
                # Depth Images
                left_depth = self.read_numpy_file(self.left_depth_list[idx])
                left_depth_vis = depth2vis(left_depth)
                header.frame_id = "left_depth"
                left_msg = from_ndarray(left_depth_vis, header)
                self.publish("left_depth", left_msg)
                right_depth = self.read_numpy_file(self.right_depth_list[idx])
                right_depth_vis = depth2vis(right_depth)
                header.frame_id = "right_depth"
                right_msg = from_ndarray(right_depth_vis, header)
                self.publish("right_depth", right_msg)
                # Semantic Segmentation
                left_seg = self.read_numpy_file(self.left_seg_list[idx])
                left_seg_vis = seg2vis(left_seg)
                header.frame_id = "left_segmentation"
                left_msg = from_ndarray(left_seg_vis, header)
                self.publish("left_segmentation", left_msg)
                right_seg = self.read_numpy_file(self.right_seg_list[idx])
                right_seg_vis = seg2vis(right_seg)
                header.frame_id = "right_segmentation"
                right_msg = from_ndarray(right_seg_vis, header)
                self.publish("right_segmentation", right_msg)
                # Left Camera Pose
                pose_stamped = PoseStamped()
                pose_stamped.header = header
                pose_stamped.header.frame_id = "left_camera"
                pose = Pose()
                pose.position.x = self.pose_l[idx][0]
                pose.position.y = self.pose_l[idx][1]
                pose.position.z = self.pose_l[idx][2]
                pose.orientation.x = self.pose_l[idx][3]
                pose.orientation.y = self.pose_l[idx][4]
                pose.orientation.z = self.pose_l[idx][5]
                pose.orientation.w = self.pose_l[idx][6]
                pose_stamped.pose = pose
                self.publish("left_pose", pose_stamped)
                # Right Camera Pose
                pose_stamped = PoseStamped()
                pose_stamped.header = header
                pose_stamped.header.frame_id = "right_camera"
                pose = Pose()
                pose.position.x = self.pose_r[idx][0]
                pose.position.y = self.pose_r[idx][1]
                pose.position.z = self.pose_r[idx][2]
                pose.orientation.x = self.pose_r[idx][3]
                pose.orientation.y = self.pose_r[idx][4]
                pose.orientation.z = self.pose_r[idx][5]
                pose.orientation.w = self.pose_r[idx][6]
                pose_stamped.pose = pose
                self.publish("right_pose", pose_stamped)

                if (idx > 0):
                    flow = self.read_numpy_file(self.flow_list[idx - 1])
                    flow_vis = flow2vis(flow)
                    header.frame_id = "optical_flow"
                    left_msg = from_ndarray(flow_vis, header)
                    self.publish("optical_flow", left_msg)
                    flow_mask = self.read_numpy_file(self.flow_mask_list[idx -
                                                                         1])
                    flow_vis_w_mask = flow2vis(flow, mask=flow_mask)
                    header.frame_id = "optical_flow_mask"
                    right_msg = from_ndarray(flow_vis_w_mask, header)
                    self.publish("optical_flow_mask", right_msg)

                ltime = time.time()
                idx += 1