def run(self):
        while True:
            try:
                # `tfds.as_numpy` converts `tf.Tensor` -> `np.array`
                for ex in tfds.as_numpy(self.ds):
                    for image, label in zip(ex['image'],ex['label']):
                        start=time.time()
                        header=Header()
                        set_timestamp(header,time.time())
                        if not image.shape[2]==1:
                            image=cv2.cvtColor(image,cv2.COLOR_RGB2BGR)
                        image_msg=from_ndarray(image,header)
                        

                        label_msg=Label()
                        label_msg.header=header
                        label_msg.confidence=1.0
                        # `int2str` returns the human readable label ('dog', 'car',...)
                        label_msg.class_name=self.info.features['label'].int2str(label)
                        

                        self.publish("image",image_msg)
                        self.publish("label",label_msg)

                        if (1.0/self.get_property("frame_rate")) > (time.time()-start):
                            time.sleep((1.0/self.get_property("frame_rate"))-(time.time()-start))               
            except:
                pass
Exemplo n.º 2
0
 def __bird_eye_image(self, image):
     carla_image_data_array = np.ndarray(
         shape=(image.height, image.width, 4), dtype=np.uint8, buffer=image.raw_data
     )
     header = Header()
     set_timestamp(header, image.timestamp)
     img_msg = from_ndarray(carla_image_data_array[:, :, :3], header)
     self.publish("bird_eye", img_msg)
 def on_new_messages(self,messages):
     
     width=self.get_property("width")
     height=self.get_property("height")
     
     dim=(width,height)
     
     resized_image=cv2.resize(to_ndarray(messages["image"]),dim,interpolation=cv2.INTER_AREA)
     
     self.publish("resized_image",from_ndarray(resized_image,messages["image"].header))
    def run(self):
        while True:
            if self.play == True:
                self.play = False
                # `tfds.as_numpy` converts `tf.Tensor` -> `np.array`
                for ex in tfds.as_numpy(self.ds):
                    start = time.time()
                    header = Header()
                    set_timestamp(header, time.time())
                    image_batch_msg = ImageBatch()
                    image_batch_msg.header = header
                    if not ex['image'].shape[3] == 1:
                        image_batch_msg.Batch = [
                            from_ndarray(cv2.cvtColor(img, cv2.COLOR_RGB2BGR),
                                         header) for img in ex["image"]
                        ]
                    else:
                        image_batch_msg.Batch = [
                            from_ndarray(img, header) for img in ex["image"]
                        ]

                    labels_msg = LabelArray()
                    labels_msg.header = header
                    for label in ex['label']:
                        label_msg = Label()
                        label_msg.header = header
                        label_msg.confidence = 1.0
                        # `int2str` returns the human readable label ('dog', 'car',...)
                        label_msg.class_name = self.info.features[
                            'label'].int2str(label)
                        labels_msg.labels.append(label_msg)

                    self.publish("image_batches", image_batch_msg)
                    self.publish("labels", labels_msg)
                    end = time.time() - start
                    if (1.0 / self.get_property("frame_rate")) > end:
                        time.sleep((1.0 / self.get_property("frame_rate")) -
                                   end)

            time.sleep(0.001)
Exemplo n.º 5
0
 def _publish_bgr_image(self, image, port_key="rgb_camera"):
     """callback function to rgb camera sensor
     it converts the image to ROS image in BGR8 encoding
     """
     carla_image_data_array = np.ndarray(
         shape=(image.height, image.width, 4), dtype=np.uint8, buffer=image.raw_data
     )
     header = Header()
     set_timestamp(header, image.timestamp)
     img_msg = from_ndarray(carla_image_data_array[:, :, :3], header)
     self.publish(port_key + "/image_raw", img_msg)
     self._camera_info.header = header
     self.publish(port_key + "/camera_info", self._camera_info)
Exemplo n.º 6
0
    def on_new_messages(self,messages):
        original_image=to_ndarray(messages['original_image'])
        if self.resize_type == "resize":
            height=self.get_property("height") if not self.get_property("height")==0 else original_image.shape[0]
            width=self.get_property("width") if not self.get_property("width")==0 else original_image.shape[1]
        else:
            height=int(original_image.shape[0]*self.get_property("height")) if not self.get_property("height")==0 else original_image.shape[0]
            width=int(original_image.shape[1]*self.get_property("width")) if not self.get_property("width")==0 else original_image.shape[1]
        dim = (width, height)
        
        resized_image = cv2.resize(original_image, dim, interpolation = self.interpolation[self.get_property("interpolation")])

        self.publish("resized_image",from_ndarray(resized_image,messages['original_image'].header))
Exemplo n.º 7
0
 def _publish_semantic_seg(self, carla_image):
     """callback function to semantic segmentation camera sensor
     it converts the image to ROS image in BGR8 encoding
     """
     carla_image.convert(carla.ColorConverter.CityScapesPalette)
     carla_image_data_array = np.ndarray(
         shape=(carla_image.height, carla_image.width, 4),
         dtype=np.uint8,
         buffer=carla_image.raw_data,
     )
     header = Header()
     set_timestamp(header, carla_image.timestamp)
     img_msg = from_ndarray(carla_image_data_array[:, :, :3], header)
     self.publish("image_seg", img_msg)
Exemplo n.º 8
0
    def run(self):
        # Block Main Loop
        last_time = time.time()  # start time of the loop
        while True:
            # timer to limit the frame rate
            if time.time() - last_time < self.period:
                try:
                    time.sleep(self.period - (time.time() - last_time))
                except:
                    pass
            # check for the time limit for the executed command
            if self.move_command:
                if self.car:
                    self.car_controls = airsim.CarControls()
                    self.car_controls.throttle = self.command_list[0]
                    self.car_controls.steering = self.command_list[1]
                    self.client.setCarControls(self.car_controls)
                    self.alert("Moving Forward ", "INFO")
                    self.car_moved = True
                    self.stop_at = time.time() + self.command_list[2]
                else:
                    self.alert("Moving To {}".format(self.command_list), "INFO")
                    self.client.moveToPositionAsync(
                        self.command_list[0],
                        self.command_list[1],
                        self.command_list[2],
                        self.command_list[3],
                    )
                self.move_command = False
            if time.time() >= self.stop_at and self.car_moved:
                self.car_controls.throttle = 0
                self.client.setCarControls(self.car_controls)
                self.car_moved = False
                self.alert("Stopping the Car ", "INFO")
            # AirSim Default Cameras names
            cameras = [
                "front_center",
                "front_right",
                "front_left",
                "fpv",
                "back_center",
            ]
            # Reading front_center camera
            camera_id = 0
            responses = self.client.simGetImages(
                [airsim.ImageRequest(0, airsim.ImageType.Scene, False, False)]
            )
            # print('Retrieved images: %d', len(responses))
            for response in responses:
                # get numpy array
                img1d = np.fromstring(response.image_data_uint8, dtype=np.uint8)
                # reshape array to 4 channel image array H X W X 4
                img_rgb = img1d.reshape(response.height, response.width, 3)
                # Create Header for the ROS message
                header = Header()
                set_timestamp(header, time.time())
                header.frame_id = cameras[camera_id]
                # Convert Numpy array to ROS message using yonoarc_utils.image
                img_msg = from_ndarray(img_rgb, header)
                # Publish this message on Block's output port
                self.publish("cam_{}".format(camera_id), img_msg)
                camera_id += 1
            # Read Semantic Segmentation Image from Camera 0
            # Create ROS message and publish it like the previous steps
            seg_responses = self.client.simGetImages(
                [airsim.ImageRequest(0, airsim.ImageType.Segmentation, False, False)]
            )
            img1d = np.fromstring(seg_responses[0].image_data_uint8, dtype=np.uint8)
            img_rgb = img1d.reshape(seg_responses[0].height, seg_responses[0].width, 3)
            header = Header()
            header.frame_id = "front_center"
            img_msg = from_ndarray(img_rgb, header)
            self.publish("sem_segm", img_msg)
            # Read Lidar Data and convert it to PointCloud2 ROS message
            lidarData = self.client.getLidarData()
            if len(lidarData.point_cloud) < 3:
                self.alert("\tNo points received from Lidar data", "WARN")
            else:
                points = self.parse_lidarData(lidarData)
                self.publish("lidar", points)

            try:
                states = self.client.getCarState()
                float_msg = Float32()
                float_msg.data = states.speed
                self.publish("speed", float_msg)
            except Exception as e:
                print(e)
                pass
            # Update Timer
            last_time = time.time()
Exemplo n.º 9
0
    def run(self):
        ltime = time.time()
        idx = 0
        while True:
            if (time.time() - ltime >= 1 / self.frequency):
                if (idx == len(self.left_img_list)):
                    idx = 0
                # RGB Images
                left_img = self.read_image_file(self.left_img_list[idx])
                right_img = self.read_image_file(self.right_img_list[idx])
                header = Header()
                set_timestamp(header, time.time())
                header.frame_id = "left_img"
                left_msg = from_ndarray(left_img, header)
                self.publish("left_img", left_msg)
                header.frame_id = "right_img"
                right_msg = from_ndarray(right_img, header)
                self.publish("right_img", right_msg)
                # Depth Images
                left_depth = self.read_numpy_file(self.left_depth_list[idx])
                left_depth_vis = depth2vis(left_depth)
                header.frame_id = "left_depth"
                left_msg = from_ndarray(left_depth_vis, header)
                self.publish("left_depth", left_msg)
                right_depth = self.read_numpy_file(self.right_depth_list[idx])
                right_depth_vis = depth2vis(right_depth)
                header.frame_id = "right_depth"
                right_msg = from_ndarray(right_depth_vis, header)
                self.publish("right_depth", right_msg)
                # Semantic Segmentation
                left_seg = self.read_numpy_file(self.left_seg_list[idx])
                left_seg_vis = seg2vis(left_seg)
                header.frame_id = "left_segmentation"
                left_msg = from_ndarray(left_seg_vis, header)
                self.publish("left_segmentation", left_msg)
                right_seg = self.read_numpy_file(self.right_seg_list[idx])
                right_seg_vis = seg2vis(right_seg)
                header.frame_id = "right_segmentation"
                right_msg = from_ndarray(right_seg_vis, header)
                self.publish("right_segmentation", right_msg)
                # Left Camera Pose
                pose_stamped = PoseStamped()
                pose_stamped.header = header
                pose_stamped.header.frame_id = "left_camera"
                pose = Pose()
                pose.position.x = self.pose_l[idx][0]
                pose.position.y = self.pose_l[idx][1]
                pose.position.z = self.pose_l[idx][2]
                pose.orientation.x = self.pose_l[idx][3]
                pose.orientation.y = self.pose_l[idx][4]
                pose.orientation.z = self.pose_l[idx][5]
                pose.orientation.w = self.pose_l[idx][6]
                pose_stamped.pose = pose
                self.publish("left_pose", pose_stamped)
                # Right Camera Pose
                pose_stamped = PoseStamped()
                pose_stamped.header = header
                pose_stamped.header.frame_id = "right_camera"
                pose = Pose()
                pose.position.x = self.pose_r[idx][0]
                pose.position.y = self.pose_r[idx][1]
                pose.position.z = self.pose_r[idx][2]
                pose.orientation.x = self.pose_r[idx][3]
                pose.orientation.y = self.pose_r[idx][4]
                pose.orientation.z = self.pose_r[idx][5]
                pose.orientation.w = self.pose_r[idx][6]
                pose_stamped.pose = pose
                self.publish("right_pose", pose_stamped)

                if (idx > 0):
                    flow = self.read_numpy_file(self.flow_list[idx - 1])
                    flow_vis = flow2vis(flow)
                    header.frame_id = "optical_flow"
                    left_msg = from_ndarray(flow_vis, header)
                    self.publish("optical_flow", left_msg)
                    flow_mask = self.read_numpy_file(self.flow_mask_list[idx -
                                                                         1])
                    flow_vis_w_mask = flow2vis(flow, mask=flow_mask)
                    header.frame_id = "optical_flow_mask"
                    right_msg = from_ndarray(flow_vis_w_mask, header)
                    self.publish("optical_flow_mask", right_msg)

                ltime = time.time()
                idx += 1
Exemplo n.º 10
0
    def on_new_messages(self, messages):
        ''' [Optional] Called according to the execution mode of the block.

        Parameters
        ----------
        messages : dict
            A dictionary of the port keys and the values of the incoming messages.

        '''
        with self.graph.as_default():
            image = to_ndarray(messages['image'])
            image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            imH, imW, _ = image.shape
            image_resized = cv2.resize(image_rgb, (self.width, self.height))
            input_data = np.expand_dims(image_resized, axis=0)
            if self.floating_model:
                input_data = (np.float32(input_data) -
                              self.input_mean) / self.input_std

            self.interpreter.set_tensor(
                self.input_details[0]['index'], input_data)

            start_time = time.time()
            self.interpreter.invoke()
            stop_time = time.time()

            boxes = self.interpreter.get_tensor(
                self.output_details[0]['index'])[0]
            classes = self.interpreter.get_tensor(
                self.output_details[1]['index'])[0]
            scores = self.interpreter.get_tensor(
                self.output_details[2]['index'])[0]

            for i in range(len(scores)):
                if ((scores[i] > self.min_conf_threshold) and (scores[i] <= 1.0)):

                    # Get bounding box coordinates and draw box
                    # Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
                    ymin = int(max(1, (boxes[i][0] * imH)))
                    xmin = int(max(1, (boxes[i][1] * imW)))
                    ymax = int(min(imH, (boxes[i][2] * imH)))
                    xmax = int(min(imW, (boxes[i][3] * imW)))

                    cv2.rectangle(image, (xmin, ymin),
                                  (xmax, ymax), (10, 255, 0), 2)

                    # Draw label
                    # Look up object name from "labels" array using class index
                    object_name = self.labels[int(classes[i])]
                    label = '%s: %d%%' % (object_name, int(
                        scores[i]*100))  # Example: 'person: 72%'
                    labelSize, baseLine = cv2.getTextSize(
                        label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2)  # Get font size
                    # Make sure not to draw label too close to top of window
                    label_ymin = max(ymin, labelSize[1] + 10)
                    # Draw white box to put label text in
                    cv2.rectangle(image, (xmin, label_ymin-labelSize[1]-10), (
                        xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED)
                    cv2.putText(image, label, (xmin, label_ymin-7),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2)  # Draw label text
            img_msg = from_ndarray(image, messages['image'].header)
            self.publish("out_img", img_msg)
            print("Detection Took {}".format(stop_time-start_time))
Exemplo n.º 11
0
 def on_new_messages(self,messages):
     labeled_image=write_classification(to_ndarray(messages['image']),messages['label'])
     self.publish("labeled_image",from_ndarray(labeled_image,messages['image'].header))