Esempio n. 1
0
    def _get_all_models_info(self):
        """ This function is used to gather all the agents, obstacles, bots
        spawn on the track. So that the exact location is gathered and icons are drawn
        overlaying on top of it. Thus making it prominent.

        Returns:
            (list, list): model_names (Names of agent, bots, obstacles)
                          model_imgs (The appropriate image icons for these)
        """
        model_names, model_imgs = list(), list()
        # Adding all agents to the list
        for i, racecar_info in enumerate(self.racecars_info):
            model_names.append(racecar_info['name'])
            model_imgs.append(utils.get_image(TrackAssetsIconographicPngs.AGENTS_PNG.value[i],
                                              IconographicImageSize.AGENTS_IMAGE_SIZE.value))

        # Adding obstacles to the list
        num_obstacles = int(rospy.get_param("NUMBER_OF_OBSTACLES", 0))
        if num_obstacles:
            for i in range(num_obstacles):
                model_names.append("obstacle_{}".format(i))
                model_imgs.append(utils.get_image(TrackAssetsIconographicPngs.OBSTACLES_PNG.value,
                                                  IconographicImageSize.OBSTACLE_IMAGE_SIZE.value))

        # Adding bot cars to the list
        num_bots = int(rospy.get_param("NUMBER_OF_BOT_CARS", 0))
        if num_bots:
            for i in range(num_bots):
                model_names.append("bot_car_{}".format(i))
                model_imgs.append(utils.get_image(TrackAssetsIconographicPngs.BOTS_PNG.value,
                                                  IconographicImageSize.BOT_CAR_IMAGE_SIZE.value))
        return model_names, model_imgs
    def _plot_track_on_gradient(self, gradient_img_path):
        """ For the given gradient apply the track iconographic image and use this to apply gradient
        on each camera frame. Previously this was done on the top camera which changed every frame. But
        with the track iconographic image set static, adding the track on gradient is more optimized.

        Arguments:
            gradient_img_path (str): Gradient image path

        Returns:
            (Image): Edited gradient image with track image
        """
        gradient_img = utils.get_image(
            gradient_img_path, IconographicImageSize.FULL_IMAGE_SIZE.value)
        gradient_img = cv2.cvtColor(gradient_img, cv2.COLOR_RGBA2BGRA)

        track_icongraphy_scaled = utils.resize_image(self.track_icongraphy_img,
                                                     SCALE_RATIO)
        track_icongraphy_alpha = track_icongraphy_scaled[:, :, 3] / 255.0

        # Track image is placed at the bottom right with some offset (only in leaderboard tracks)
        x_min = -(self.track_loc_offset[1] + track_icongraphy_scaled.shape[0])
        x_max = gradient_img.shape[0] - self.track_loc_offset[1]
        y_min = -(self.track_loc_offset[0] + track_icongraphy_scaled.shape[1])
        y_max = gradient_img.shape[1] - self.track_loc_offset[0]

        # This is used as the offset for plotting the agent dots
        self.track_start_loc = (gradient_img.shape[1] + y_min,
                                gradient_img.shape[0] + x_min)

        for channel in range(0, 4):
            gradient_img[x_min:x_max, y_min:y_max, channel] =\
                (track_icongraphy_alpha * track_icongraphy_scaled[:, :, channel]) + \
                (1 - track_icongraphy_alpha) * (gradient_img[x_min:x_max, y_min:y_max, channel])
        return gradient_img
    def __init__(self, racecar_name, racecars_info, race_type):
        """ This class is used for head to head racing where there are more than one agent
        Args:
            racecar_name (str): The agent name with 45degree camera view
            racecars_info (dict): All the agents information
            race_type (str): The type of race. This is used to know if its race type or evaluation
        """
        self.racecar_name = racecar_name
        self.racecars_info = racecars_info
        self.race_type = race_type
        # init cv bridge
        self.bridge = CvBridge()
        # Store the font which we will use to write the phase with
        self.amazon_ember_regular_20px = utils.get_font(
            'AmazonEmber-Regular', 20)
        self.amazon_ember_regular_16px = utils.get_font(
            'AmazonEmber-Regular', 16)
        self.amazon_ember_heavy_30px = utils.get_font('AmazonEmber-Heavy', 30)
        self.amazon_ember_light_18px = utils.get_font('AmazonEmber-Light', 18)
        self.amazon_ember_light_20px = utils.get_font('AmazonEmber-Light', 20)
        self.amazon_ember_light_italic_20px = utils.get_font(
            'AmazonEmber-LightItalic', 20)

        self.is_racing = rospy.get_param("VIDEO_JOB_TYPE", "") == "RACING"
        self.is_league_leaderboard = rospy.get_param("LEADERBOARD_TYPE",
                                                     "") == "LEAGUE"
        self.leaderboard_name = rospy.get_param("LEADERBOARD_NAME", "")

        # The track image as iconography
        self.track_icongraphy_img = utils.get_track_iconography_image()
        gradient_img_path = TrackAssetsIconographicPngs.HEAD_TO_HEAD_OVERLAY_PNG_LEAGUE_LEADERBOARD.value \
            if self.is_league_leaderboard else TrackAssetsIconographicPngs.HEAD_TO_HEAD_OVERLAY_PNG.value
        self.gradient_img = utils.get_image(
            gradient_img_path, IconographicImageSize.FULL_IMAGE_SIZE.value)
        self.gradient_alpha = self.gradient_img[:, :, 3] / 255.0

        self.mp4_video_metrics_srv_list = list()
        for racecar_info in self.racecars_info:
            agent_name = 'agent' if len(
                racecars_info) == 1 else "agent_{}".format(
                    racecar_info['name'].split("_")[1])
            rospy.wait_for_service("/{}/{}".format(agent_name,
                                                   "mp4_video_metrics"))
            self.mp4_video_metrics_srv_list.append(
                ServiceProxyWrapper(
                    "/{}/{}".format(agent_name, "mp4_video_metrics"),
                    VideoMetricsSrv))

        # Top camera information
        top_camera_info = utils.get_top_camera_info()
        self.edited_topview_pub = rospy.Publisher('/deepracer/topview_stream',
                                                  ROSImg,
                                                  queue_size=1)
        self.top_view_graphics = TopViewGraphics(
            top_camera_info.horizontal_fov, top_camera_info.padding_pct,
            top_camera_info.image_width, top_camera_info.image_height,
            racecars_info)
    def __init__(self, total_sectors):
        """initialize Finish state with a finish time for how long to display check flag

        Args:
            total_sectors (int): total number of sectors
        """
        LOG.info("[virtual event]: video edit state at {}".format(self))
        self._icon_image = utils.get_image(
            VirtualEventIconographicPngs.FINISH.value)
        self._icon_image = cv2.cvtColor(self._icon_image, cv2.COLOR_RGBA2BGRA)
        self._sectors = [
            SECTOR_X_FORMAT.format(idx + 1) for idx in range(total_sectors)
        ]
    def __init__(self, racecar_info, race_type):
        """ Initializing the required data for the head to bot, time-trail. This is used for single agent
        Arguments:
            racecars_info (list): list of dict having information of the agent
            race_type (str): Since this class is reused for all the different race_type
        """
        self.racecar_info = racecar_info
        self.race_type = race_type
        # Store the font which we will use to write the phase with
        self.amazon_ember_regular_20px = utils.get_font(
            'AmazonEmber-Regular', 20)
        self.amazon_ember_regular_16px = utils.get_font(
            'AmazonEmber-Regular', 16)
        self.amazon_ember_heavy_30px = utils.get_font('AmazonEmber-Heavy', 30)
        self.amazon_ember_light_18px = utils.get_font('AmazonEmber-Light', 18)
        self.amazon_ember_light_20px = utils.get_font('AmazonEmber-Light', 20)
        self.amazon_ember_light_italic_20px = utils.get_font(
            'AmazonEmber-LightItalic', 20)

        self.is_racing = rospy.get_param("VIDEO_JOB_TYPE", "") == "RACING"
        self.is_league_leaderboard = rospy.get_param("LEADERBOARD_TYPE",
                                                     "") == "LEAGUE"
        self.leaderboard_name = rospy.get_param("LEADERBOARD_NAME", "")

        # The track image as iconography
        self.track_icongraphy_img = utils.get_track_iconography_image()
        gradient_img_path = TrackAssetsIconographicPngs.OBSTACLE_OVERLAY_PNG_LEAGUE_LEADERBOARD.value \
            if self.is_league_leaderboard else TrackAssetsIconographicPngs.OBSTACLE_OVERLAY_PNG.value
        self.gradient_img = utils.get_image(
            gradient_img_path, IconographicImageSize.FULL_IMAGE_SIZE.value)
        self.gradient_alpha = self.gradient_img[:, :, 3] / 255.0

        # Subscribing to the agent metrics
        rospy.wait_for_service("/agent/mp4_video_metrics")
        self.mp4_video_metrics_srv = ServiceProxyWrapper(
            "/agent/mp4_video_metrics", VideoMetricsSrv)

        # Top camera information
        top_camera_info = utils.get_top_camera_info()
        self.edited_topview_pub = rospy.Publisher('/deepracer/topview_stream',
                                                  ROSImg,
                                                  queue_size=1)
        self.top_view_graphics = TopViewGraphics(
            top_camera_info.horizontal_fov, top_camera_info.padding_pct,
            top_camera_info.image_width, top_camera_info.image_height,
            racecar_info)
    def _edit_major_cv_image(self, major_cv_image):
        """ Apply all the editing for the Major 45degree camera image
        Args:
            major_cv_image (Image): Image straight from the camera
        Returns:
            Image: Edited main camera image
        """
        # Applying gradient to whole major image and then writing text
        major_cv_image = utils.apply_gradient(major_cv_image,
                                              self.gradient_img,
                                              self.gradient_alpha)

        # Subscribing to the agent metrics
        mp4_video_metrics_info = list()
        for racecar_info, mp4_video_metrics_srv in zip(
                self.racecars_info, self.mp4_video_metrics_srv_list):
            mp4_video_metrics = mp4_video_metrics_srv(VideoMetricsSrvRequest())
            mp4_video_metrics_info.append(mp4_video_metrics)

        # Adding display name to the image
        agents_speed = 0
        agent_done = False
        for i, racecar_info in enumerate(self.racecars_info):
            loc_x, loc_y = XYPixelLoc.MULTI_AGENT_DISPLAY_NAME_LOC.value[i][
                0], XYPixelLoc.MULTI_AGENT_DISPLAY_NAME_LOC.value[i][1]
            # Display name (Racer name/Model name)
            display_name = racecar_info['display_name']
            display_name_txt = display_name if len(
                display_name) < 15 else "{}...".format(display_name[:15])
            major_cv_image = utils.write_text_on_image(
                image=major_cv_image,
                text=display_name_txt,
                loc=(loc_x, loc_y),
                font=self.amazon_ember_regular_20px,
                font_color=RaceCarColorToRGB.White.value,
                font_shadow_color=RaceCarColorToRGB.Black.value)
            # Lap Counter
            loc_y += 30
            total_laps = rospy.get_param("NUMBER_OF_TRIALS", 0)
            current_lap = int(mp4_video_metrics_info[i].lap_counter) + 1
            lap_counter_text = "{}/{}".format(current_lap, total_laps)
            major_cv_image = utils.write_text_on_image(
                image=major_cv_image,
                text=lap_counter_text,
                loc=(loc_x, loc_y),
                font=self.amazon_ember_heavy_30px,
                font_color=RaceCarColorToRGB.White.value,
                font_shadow_color=RaceCarColorToRGB.Black.value)
            # Reset counter
            loc_y += 45
            reset_counter_text = "Reset | {}".format(
                mp4_video_metrics_info[i].reset_counter)
            major_cv_image = utils.write_text_on_image(
                image=major_cv_image,
                text=reset_counter_text,
                loc=(loc_x, loc_y),
                font=self.amazon_ember_light_18px,
                font_color=RaceCarColorToRGB.White.value,
                font_shadow_color=RaceCarColorToRGB.Black.value)
            if self.racecar_name == racecar_info['name']:
                agents_speed = mp4_video_metrics_info[i].throttle
            # The race is complete when total lap is same as current lap and done flag is set
            agent_done = agent_done or (mp4_video_metrics_info[i].done and
                                        (current_lap == int(total_laps)))

        # Speed
        loc_x, loc_y = XYPixelLoc.SPEED_EVAL_LOC.value
        if self.is_league_leaderboard:
            loc_x, loc_y = XYPixelLoc.SPEED_LEADERBOARD_LOC.value
        speed_text = "{} m/s".format(
            utils.get_speed_formatted_str(agents_speed))
        major_cv_image = utils.write_text_on_image(
            image=major_cv_image,
            text=speed_text,
            loc=(loc_x, loc_y),
            font=self.amazon_ember_light_20px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)
        # Leaderboard name
        if self.is_league_leaderboard:
            major_cv_image = utils.write_text_on_image(
                image=major_cv_image,
                text=self.leaderboard_name,
                loc=XYPixelLoc.LEADERBOARD_NAME_LOC.value,
                font=self.amazon_ember_regular_16px,
                font_color=RaceCarColorToRGB.White.value,
                font_shadow_color=RaceCarColorToRGB.Black.value)

        # Evaluation type
        loc_x, loc_y = XYPixelLoc.RACE_TYPE_EVAL_LOC.value
        if self.is_league_leaderboard:
            loc_x, loc_y = XYPixelLoc.RACE_TYPE_RACE_LOC.value
        race_text = "race" if self.is_racing else "evaluation"
        evaluation_type_txt = "{} {}".format(
            RACE_TYPE_TO_VIDEO_TEXT_MAPPING[self.race_type], race_text)
        major_cv_image = utils.write_text_on_image(
            image=major_cv_image,
            text=evaluation_type_txt,
            loc=(loc_x, loc_y),
            font=self.amazon_ember_light_italic_20px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)
        # total_evaluation_time (Race time)
        loc_x, loc_y = XYPixelLoc.MULTI_AGENT_EVAL_TIME.value
        total_eval_milli_seconds = mp4_video_metrics_info[
            0].total_evaluation_time
        time_delta = datetime.timedelta(milliseconds=total_eval_milli_seconds)
        total_eval_time_text = "Race | {}".format(
            utils.milliseconds_to_timeformat(time_delta))
        major_cv_image = utils.write_text_on_image(
            image=major_cv_image,
            text=total_eval_time_text,
            loc=(loc_x, loc_y),
            font=self.amazon_ember_light_18px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)
        # AWS Deepracer logo at the bottom for the community leaderboard
        if self.is_league_leaderboard:
            major_cv_image = utils.write_text_on_image(
                image=major_cv_image,
                text=AWS_DEEPRACER_WATER_MARK,
                loc=XYPixelLoc.AWS_DEEPRACER_WATER_MARK_LOC.value,
                font=self.amazon_ember_regular_16px,
                font_color=RaceCarColorToRGB.White.value,
                font_shadow_color=RaceCarColorToRGB.Black.value)

        # Check if the done flag is set and set the banner appropriately
        if agent_done:
            # When the cv2 text is written, it automatically drops the alpha value of the image
            rel_y_offset = XYPixelLoc.TRACK_IMG_WITH_OFFSET_LOC.value[
                1] if self.is_league_leaderboard else 0
            major_cv_image = cv2.cvtColor(major_cv_image, cv2.COLOR_RGB2RGBA)
            racecomplete_image = utils.get_image(
                TrackAssetsIconographicPngs.RACE_COMPLETE_OVERLAY_PNG.value,
                IconographicImageSize.RACE_COMPLETE_IMAGE_SIZE.value)
            x_offset = major_cv_image.shape[
                1] - racecomplete_image.shape[1] // 2
            y_offset = major_cv_image.shape[
                0] - RACE_COMPLETE_Y_OFFSET - rel_y_offset - racecomplete_image.shape[
                    0] // 2
            major_cv_image = utils.plot_rectangular_image_on_main_image(
                major_cv_image, racecomplete_image, (x_offset, y_offset))

        return major_cv_image
Esempio n. 7
0
    def __init__(self, current_sector=0):
        """initialize Run state and log

        Args:
            current_sector (int): current sector index 0-based.
        """
        self._total_sectors = int(rospy.get_param("NUM_SECTORS", "3"))
        if self._total_sectors == 0:
            log_and_exit(
                "[virtual event]: Virtual event run state with 0 total sectors. \
                         This needs to be at least 1",
                SIMAPP_SIMULATION_WORKER_EXCEPTION,
                SIMAPP_EVENT_ERROR_CODE_500)

        # current sector index 0 is sector1 and so on so forth
        self._current_sector = current_sector % self._total_sectors
        self._target_progress = (100.00 / self._total_sectors) * (
            self._current_sector + 1)
        self._total_laps = int(rospy.get_param("NUMBER_OF_TRIALS", 3))
        self._race_duration = int(
            rospy.get_param("RACE_DURATION", DEFAULT_RACE_DURATION)) * 1000

        # VirtualEventBestSectorTime S3 upload instance
        # use the s3 bucket and prefix for yaml file stored as environment variable because
        # here is SimApp use only. For virtual event there is no s3 bucket and prefix past
        # through yaml file. All are past through sqs. For simplicity, reuse the yaml s3 bucket
        # and prefix environment variable.
        self._virtual_event_best_sector_time = VirtualEventBestSectorTime(
            bucket=os.environ.get("YAML_S3_BUCKET", ''),
            s3_key=get_s3_key(os.environ.get("YAML_S3_PREFIX", ''),
                              SECTOR_TIME_S3_POSTFIX),
            region_name=os.environ.get("APP_REGION", "us-east-1"),
            local_path=SECTOR_TIME_LOCAL_PATH)

        # Go icon image
        self._icon_image = utils.get_image(
            VirtualEventIconographicPngs.GO.value)
        self._icon_image = cv2.cvtColor(self._icon_image, cv2.COLOR_RGBA2BGRA)

        # init number of sectors to plot
        # for 3 sectors example
        # if racer is at sector 1 (idx 0): plot sector 1, 2, and 3
        # if racer is at sector 2 (idx 1): plot sector 1
        # if racer is at sector 3 (idx 2): plot sector 1, and 2
        num_sectors_to_plot = self._current_sector
        if self._current_sector == 0:
            num_sectors_to_plot = self._total_sectors

        # init number of sectors to plot
        self._sectors = [
            SECTOR_X_FORMAT.format(idx + 1)
            for idx in range(num_sectors_to_plot)
        ]

        # sector format string for best session, best personal, and current personal
        self._best_session_format = SECTOR_TIME_FORMAT_DICT[
            TrackSectorTime.BEST_SESSION]
        self._best_personal_format = SECTOR_TIME_FORMAT_DICT[
            TrackSectorTime.BEST_PERSONAL]
        self._current_personal_format = SECTOR_TIME_FORMAT_DICT[
            TrackSectorTime.CURRENT_PERSONAL]

        LOG.info(
            "[virtual event]: video edit state at {} for sector {} with target progress {}\
            ".format(self, self._current_sector + 1, self._target_progress))
    def __init__(self, racecar_name, racecars_info, race_type):
        """ This class is used for head to head racing where there are more than one agent
        Args:
            racecar_name (str): The agent name with 45degree camera view
            racecars_info (dict): All the agents information
            race_type (str): The type of race. This is used to know if its race type or evaluation
        """
        self.racecar_name = racecar_name
        self.racecars_info = racecars_info
        racecar_index = get_racecar_idx(racecar_name)
        self.racecar_index = racecar_index if racecar_index else 0
        self.race_type = race_type

        # Store the font which we will use to write the phase with
        self.formula1_display_regular_12px = utils.get_font(
            'Formula1-Display-Regular', 12)
        self.formula1_display_regular_14px = utils.get_font(
            'Formula1-Display-Regular', 14)
        self.formula1_display_regular_16px = utils.get_font(
            'Formula1-Display-Regular', 16)
        self.formula1_display_wide_12px = utils.get_font(
            'Formula1-Display-Wide', 12)
        self.formula1_display_bold_16px = utils.get_font(
            'Formula1-Display-Bold', 16)

        self.total_laps = int(rospy.get_param("NUMBER_OF_TRIALS", 0))
        self.is_league_leaderboard = rospy.get_param("LEADERBOARD_TYPE",
                                                     "") == "LEAGUE"
        self.leaderboard_name = rospy.get_param("LEADERBOARD_NAME", "")

        # The track image as iconography
        self.track_icongraphy_img = utils.get_track_iconography_image()

        # Track image offset
        self.track_loc_offset = XYPixelLoc.TRACK_IMG_WITHOUT_OFFSET_LOC.value

        # Default image of top view
        gradient_default_img_path = TrackAssetsIconographicPngs.F1_OVERLAY_DEFAULT_PNG.value
        self.gradient_default_img = self._plot_track_on_gradient(
            gradient_default_img_path)
        self.gradient_default_alpha_rgb_mul, self.one_minus_gradient_default_alpha = utils.get_gradient_values(
            self.gradient_default_img)

        # Midway track gradient
        gradient_midway_img_path = TrackAssetsIconographicPngs.F1_OVERLAY_MIDWAY_PNG.value
        self.gradient_midway_img = self._plot_track_on_gradient(
            gradient_midway_img_path)
        self.gradient_midway_alpha_rgb_mul, self.one_minus_gradient_midway_alpha = utils.get_gradient_values(
            self.gradient_midway_img)

        # Finisher track gradient
        gradient_finisher_img_path = TrackAssetsIconographicPngs.F1_OVERLAY_FINISHERS_PNG.value
        self.gradient_finisher_img = self._plot_track_on_gradient(
            gradient_finisher_img_path)
        self.gradient_finisher_alpha_rgb_mul, self.one_minus_gradient_finisher_alpha = utils.get_gradient_values(
            self.gradient_finisher_img)

        # Top camera gradient
        num_racers = len(self.racecars_info)
        if num_racers <= 8:
            # TODO: Add one box image and use 1 box image if number of racers are <= 4.
            gradient_top_camera_img_path = TrackAssetsIconographicPngs.F1_OVERLAY_TOPVIEW_2BOX_PNG.value
        elif num_racers <= 12:
            gradient_top_camera_img_path = TrackAssetsIconographicPngs.F1_OVERLAY_TOPVIEW_3BOX_PNG.value
        else:
            raise Exception(
                "More than 12 racers are not supported for Grand Prix")

        gradient_top_camera_img = utils.get_image(
            gradient_top_camera_img_path,
            IconographicImageSize.FULL_IMAGE_SIZE.value)
        gradient_top_camera_img = cv2.cvtColor(gradient_top_camera_img,
                                               cv2.COLOR_RGBA2BGRA)
        self.gradient_top_camera_alpha_rgb_mul, self.one_minus_gradient_top_camera_alpha = utils.get_gradient_values(
            gradient_top_camera_img)

        # Top camera information
        top_camera_info = utils.get_top_camera_info()
        self.edited_topview_pub = rospy.Publisher('/deepracer/topview_stream',
                                                  ROSImg,
                                                  queue_size=1)
        self.top_view_graphics = TopViewGraphics(
            top_camera_info.horizontal_fov, top_camera_info.padding_pct,
            top_camera_info.image_width, top_camera_info.image_height,
            racecars_info, race_type)
        self.hex_car_colors = [
            val['racecar_color'].split('_')[-1] for val in racecars_info
        ]
        self._racer_color_code_rect_img = list()
        self._racer_color_code_slash_img = list()
        for car_color in self.hex_car_colors:
            # Rectangular png of racers
            racer_color_code_rect = "{}_{}".format(
                TrackAssetsIconographicPngs.F1_AGENTS_RECT_DISPLAY_ICON_PNG.
                value, car_color)
            self._racer_color_code_rect_img.append(
                utils.get_image(
                    racer_color_code_rect, IconographicImageSize.
                    F1_RACER_RECT_DISPLAY_ICON_SIZE.value))
            # Slash png of racers
            racer_color_code_slash = "{}_{}".format(
                TrackAssetsIconographicPngs.F1_AGENTS_SLASH_DISPLAY_ICON_PNG.
                value, car_color)
            racer_color_code_slash_img = utils.get_image(
                racer_color_code_slash,
                IconographicImageSize.F1_RACER_SLASH_DISPLAY_ICON_SIZE.value)
            self._racer_color_code_slash_img.append(
                cv2.cvtColor(racer_color_code_slash_img, cv2.COLOR_RGBA2BGRA))
Esempio n. 9
0
    def _get_all_models_info(self):
        """ This function is used to gather all the agents, obstacles, bots
        spawn on the track. So that the exact location is gathered and icons are drawn
        overlaying on top of it. Thus making it prominent.

        Returns:
            (dict): "object_imgs" - Appropriate image icons for boxes and bots
                    "agent_imgs" - Appropriate image icons for agents
                    "agent_num_imgs" - Appropriate image icons for agents number
        """
        object_imgs = list()
        agent_imgs = list()
        agent_num_imgs = list()
        # Adding obstacles to the list
        num_obstacles = int(rospy.get_param("NUMBER_OF_OBSTACLES", 0))
        if num_obstacles:
            # Other agents also come as object_locations so first plot all the obstacles and
            # Then overlay agents on top.
            for i in range(num_obstacles):
                object_imgs.append(
                    utils.get_image(
                        TrackAssetsIconographicPngs.OBSTACLES_PNG.value,
                        IconographicImageSize.OBSTACLE_IMAGE_SIZE.value))

        # Adding bot cars to the list
        num_bots = int(rospy.get_param("NUMBER_OF_BOT_CARS", 0))
        if num_bots:
            # Other agents also come as object_locations so first plot all the obstacles and
            # Then overlay agents on top.
            for i in range(num_bots):
                object_imgs.append(
                    utils.get_image(
                        TrackAssetsIconographicPngs.BOTS_PNG.value,
                        IconographicImageSize.BOT_CAR_IMAGE_SIZE.value))

        # Adding all agents to the list
        agents_png = TrackAssetsIconographicPngs.VIRTUAL_EVENT_AGENTS_PNG.value \
            if self._is_virtual_event else TrackAssetsIconographicPngs.AGENTS_PNG.value
        agent_size = IconographicImageSize.VIRTUAL_EVENT_AGENTS_IMAGE_SIZE.value \
            if self._is_virtual_event else IconographicImageSize.AGENTS_IMAGE_SIZE.value
        for i, _ in enumerate(self.racecars_info):
            # If the number of racecars are greater than 2
            if self.is_f1_race_type:
                hex_car_color = self.racecars_info[i]['racecar_color'].split(
                    '_')[-1]
                racer_number = int(self.racecars_info[i]['name'].split("_")[1])
                agents_round_img = "{}_{}".format(
                    TrackAssetsIconographicPngs.F1_AGENTS_PNG.value,
                    hex_car_color)
                agent_imgs.append(
                    utils.get_image(
                        agents_round_img,
                        IconographicImageSize.F1_AGENTS_IMAGE_SIZE.value))
                agent_number_img_name = "{}_{}".format(
                    TrackAssetsIconographicPngs.F1_AGENTS_NUM_PNG.value,
                    racer_number + 1)
                agent_num_imgs.append(
                    utils.get_image(
                        agent_number_img_name,
                        IconographicImageSize.F1_AGENTS_IMAGE_SIZE.value))
            else:
                agent_imgs.append(
                    utils.get_image(
                        TrackAssetsIconographicPngs.AGENTS_PNG.value[i % 2],
                        IconographicImageSize.AGENTS_IMAGE_SIZE.value))
        return {
            ModelImgNames.OBJECT_IMGS.value: object_imgs,
            ModelImgNames.AGENT_IMGS.value: agent_imgs,
            ModelImgNames.AGENT_NUM_IMGS.value: agent_num_imgs
        }
Esempio n. 10
0
    def _edit_major_cv_image(self, major_cv_image, mp4_video_metrics_info):
        """ Apply all the editing for the Major 45degree camera image
        Args:
            major_cv_image (Image): Image straight from the camera
        Returns:
            Image: Edited main camera image
        """
        # Applying gradient to whole major image and then writing text
        major_cv_image = utils.apply_gradient(major_cv_image,
                                              self.gradient_alpha_rgb_mul,
                                              self.one_minus_gradient_alpha)

        # Top left location of the picture
        loc_x, loc_y = XYPixelLoc.SINGLE_AGENT_DISPLAY_NAME_LOC.value

        # Display name (Racer name/Model name)
        display_name = self.racecar_info[self.racecar_index]['display_name']
        display_name_txt = display_name if len(
            display_name) < 15 else "{}...".format(display_name[:15])
        major_cv_image = utils.write_text_on_image(
            image=major_cv_image,
            text=display_name_txt,
            loc=(loc_x, loc_y),
            font=self.amazon_ember_regular_20px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)
        # Lap Counter
        loc_y += 30
        total_laps = rospy.get_param("NUMBER_OF_TRIALS", 0)
        current_lap = min(
            int(mp4_video_metrics_info[self.racecar_index].lap_counter) + 1,
            total_laps)
        lap_counter_text = "{}/{}".format(current_lap, total_laps)
        major_cv_image = utils.write_text_on_image(
            image=major_cv_image,
            text=lap_counter_text,
            loc=(loc_x, loc_y),
            font=self.amazon_ember_heavy_30px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)
        # total_evaluation_time (Race time)
        loc_y += 45
        total_eval_milli_seconds = mp4_video_metrics_info[
            self.racecar_index].total_evaluation_time
        time_delta = datetime.timedelta(milliseconds=total_eval_milli_seconds)
        total_eval_time_text = "Race | {}".format(
            utils.milliseconds_to_timeformat(time_delta))
        major_cv_image = utils.write_text_on_image(
            image=major_cv_image,
            text=total_eval_time_text,
            loc=(loc_x, loc_y),
            font=self.amazon_ember_light_18px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)
        # Reset counter
        loc_y += 25
        reset_counter_text = "Reset | {}".format(
            mp4_video_metrics_info[self.racecar_index].reset_counter)
        major_cv_image = utils.write_text_on_image(
            image=major_cv_image,
            text=reset_counter_text,
            loc=(loc_x, loc_y),
            font=self.amazon_ember_light_18px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)
        # Speed
        loc_x, loc_y = XYPixelLoc.SPEED_EVAL_LOC.value
        if self.is_league_leaderboard:
            loc_x, loc_y = XYPixelLoc.SPEED_LEADERBOARD_LOC.value
        speed_text = "{} m/s".format(
            utils.get_speed_formatted_str(
                mp4_video_metrics_info[self.racecar_index].throttle))
        major_cv_image = utils.write_text_on_image(
            image=major_cv_image,
            text=speed_text,
            loc=(loc_x, loc_y),
            font=self.amazon_ember_light_20px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)
        # Leaderboard name
        if self.is_league_leaderboard:
            loc_x, loc_y = XYPixelLoc.LEADERBOARD_NAME_LOC.value
            major_cv_image = utils.write_text_on_image(
                image=major_cv_image,
                text=self.leaderboard_name,
                loc=(loc_x, loc_y),
                font=self.amazon_ember_regular_16px,
                font_color=RaceCarColorToRGB.White.value,
                font_shadow_color=RaceCarColorToRGB.Black.value)
        # Evaluation type
        loc_x, loc_y = XYPixelLoc.RACE_TYPE_EVAL_LOC.value
        if self.is_league_leaderboard:
            loc_x, loc_y = XYPixelLoc.RACE_TYPE_RACE_LOC.value
        race_text = "race" if self.is_racing else "evaluation"
        evaluation_type_txt = "{} {}".format(
            RACE_TYPE_TO_VIDEO_TEXT_MAPPING[self.race_type], race_text)
        major_cv_image = utils.write_text_on_image(
            image=major_cv_image,
            text=evaluation_type_txt,
            loc=(loc_x, loc_y),
            font=self.amazon_ember_light_italic_20px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)

        # AWS Deepracer logo at the bottom for the community leaderboard
        if self.is_league_leaderboard:
            major_cv_image = utils.write_text_on_image(
                image=major_cv_image,
                text=AWS_DEEPRACER_WATER_MARK,
                loc=XYPixelLoc.AWS_DEEPRACER_WATER_MARK_LOC.value,
                font=self.amazon_ember_regular_16px,
                font_color=RaceCarColorToRGB.White.value,
                font_shadow_color=RaceCarColorToRGB.Black.value)

        # Check if the done flag is set and set the banner appropriately
        if mp4_video_metrics_info[self.racecar_index].done and (
                int(total_laps) >= current_lap):
            # When the cv2 text is written, it automatically drops the alpha value of the image
            rel_y_offset = XYPixelLoc.TRACK_IMG_WITH_OFFSET_LOC.value[
                1] if self.is_league_leaderboard else 0
            racecomplete_image = utils.get_image(
                TrackAssetsIconographicPngs.RACE_COMPLETE_OVERLAY_PNG.value,
                IconographicImageSize.RACE_COMPLETE_IMAGE_SIZE.value)
            x_offset = major_cv_image.shape[
                1] - racecomplete_image.shape[1] // 2
            y_offset = major_cv_image.shape[
                0] - RACE_COMPLETE_Y_OFFSET - rel_y_offset - racecomplete_image.shape[
                    0] // 2
            major_cv_image = utils.plot_rectangular_image_on_main_image(
                major_cv_image, racecomplete_image, (x_offset, y_offset))
        major_cv_image = cv2.cvtColor(major_cv_image, cv2.COLOR_RGB2BGRA)
        return major_cv_image
    def __init__(self, racecar_name, racecar_info, race_type):
        """ Initializing the required data for the head to bot, time-trail. This is used for single agent
        Arguments:
            racecar_name (str): racecar name in string
            racecars_info (list): list of dict having information of the agent
            race_type (str): Since this class is reused for all the different race_type
        """
        # race duration in milliseconds
        self._world_name = rospy.get_param("WORLD_NAME")
        self.num_sectors = int(rospy.get_param("NUM_SECTORS", "3"))
        self.race_duration = int(
            rospy.get_param("RACE_DURATION", DEFAULT_RACE_DURATION)) * 1000
        self.racecar_info = racecar_info
        self.race_type = race_type
        racecar_index = get_racecar_idx(racecar_name)
        self.racecar_index = racecar_index if racecar_index else 0
        # Store the font which we will use to write the phase with
        self.amazon_ember_regular_28px = utils.get_font(
            'AmazonEmber-Regular', 28)
        self.amazon_ember_regular_14px = utils.get_font(
            'AmazonEmber-Regular', 14)

        # The track image as iconography
        self.track_icongraphy_img = utils.get_track_iconography_image()

        # Track image offset
        self.track_loc_offset = VirtualEventXYPixelLoc.TRACK_IMG_VIRTUAL_EVENT_LOC.value
        self._track_x_min = None
        self._track_x_max = None
        self._track_y_min = None
        self._track_y_max = None

        # Gradient overlay image with track and virtual event mock
        gradient_img_path = VirtualEventIconographicPngs.OVERLAY_PNG.value
        self.gradient_img = self._plot_track_on_gradient(gradient_img_path)

        # Time remaining text
        loc_x, loc_y = VirtualEventXYPixelLoc.TIME_REMAINING_TEXT.value
        self.gradient_img = utils.write_text_on_image(
            image=self.gradient_img,
            text="TIME REMAINING",
            loc=(loc_x, loc_y),
            font=self.amazon_ember_regular_14px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)

        # Speed text
        loc_x, loc_y = VirtualEventXYPixelLoc.SPEED_TEXT.value
        self.gradient_img = utils.write_text_on_image(
            image=self.gradient_img,
            text="m/s",
            loc=(loc_x, loc_y),
            font=self.amazon_ember_regular_14px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)

        # Reset text
        loc_x, loc_y = VirtualEventXYPixelLoc.RESET_TEXT.value
        self.gradient_img = utils.write_text_on_image(
            image=self.gradient_img,
            text="RESET",
            loc=(loc_x, loc_y),
            font=self.amazon_ember_regular_14px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)

        # current lap time text
        loc_x, loc_y = VirtualEventXYPixelLoc.CURRENT_LAP_TIME_TEXT.value
        self.gradient_img = utils.write_text_on_image(
            image=self.gradient_img,
            text="CURRENT LAP TIME",
            loc=(loc_x, loc_y),
            font=self.amazon_ember_regular_14px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)

        # best lap time text
        loc_x, loc_y = VirtualEventXYPixelLoc.BEST_LAP_TIME_TEXT.value
        self.gradient_img = utils.write_text_on_image(
            image=self.gradient_img,
            text="BEST LAP TIME",
            loc=(loc_x, loc_y),
            font=self.amazon_ember_regular_14px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)

        # apply graident
        self.gradient_alpha_rgb_mul, self.one_minus_gradient_alpha = utils.get_gradient_values(
            self.gradient_img)

        # Top camera information
        top_camera_info = utils.get_top_camera_info()
        self.top_view_graphics = TopViewGraphics(
            top_camera_info.horizontal_fov,
            top_camera_info.padding_pct,
            top_camera_info.image_width,
            top_camera_info.image_height,
            racecar_info,
            is_virtual_event=True)

        # virtual event image editting state machine
        self._image_edit_fsm = FSM(initial_state=VirtualEventWaitState())
        # if best sector time download from s3 failed. Then, initialize best sector time as None
        # and not display sector color
        self._sector_times = {}

        # declare sector images
        self._sectors_img_dict = {}
        for idx in range(self.num_sectors):
            sector = SECTOR_X_FORMAT.format(idx + 1)
            sector_color_img_dict = utils.init_sector_img_dict(
                world_name=self._world_name, sector=sector)
            self._sectors_img_dict[sector] = sector_color_img_dict

        # use the s3 bucket and prefix for yaml file stored as environment variable because
        # here is SimApp use only. For virtual event there is no s3 bucket and prefix past
        # through yaml file. All are past through sqs. For simplicity, reuse the yaml s3 bucket
        # and prefix environment variable.
        self._virtual_event_best_sector_time = VirtualEventBestSectorTime(
            bucket=os.environ.get("YAML_S3_BUCKET", ''),
            s3_key=get_s3_key(os.environ.get("YAML_S3_PREFIX", ''),
                              SECTOR_TIME_S3_POSTFIX),
            region_name=os.environ.get("APP_REGION", "us-east-1"),
            local_path=SECTOR_TIME_LOCAL_PATH)
        self._sector_times.update(
            self._virtual_event_best_sector_time.get_sector_time(
                num_sectors=self.num_sectors))

        # declare default best personal and current persoanl time to inf
        for idx in range(self.num_sectors):
            sector = SECTOR_X_FORMAT.format(idx + 1)
            self._sector_times[SECTOR_TIME_FORMAT_DICT[
                TrackSectorTime.BEST_PERSONAL].format(sector)] = float("inf")
            self._sector_times[SECTOR_TIME_FORMAT_DICT[
                TrackSectorTime.CURRENT_PERSONAL].format(sector)] = float(
                    "inf")

        self._curr_lap_time = 0
        self._last_eval_time = 0
        self._curr_progress = 0
        self._last_progress = 0
        self._current_lap = 1

        # Initializing the fader behaviour to pre-compute the gradient values
        final_fading_image = utils.get_image(
            VirtualEventIconographicPngs.FINAL_FADING_IMAGE_50ALPHA.value,
            IconographicImageSize.FULL_IMAGE_SIZE.value)
        final_fading_image = cv2.cvtColor(final_fading_image,
                                          cv2.COLOR_RGBA2BGRA)
        self._fader_obj = Fader(
            final_fading_image,
            fading_min_percent=VirtualEventFader.FADING_MIN_PERCENT.value,
            fading_max_percent=VirtualEventFader.FADING_MAX_PERCENT.value,
            num_frames=VirtualEventFader.NUM_FRAMES.value)
Esempio n. 12
0
    def _edit_major_cv_image(self, major_cv_image):
        """ Apply all the editing for the Major 45degree camera image
        Args:
            major_cv_image (Image): Image straight from the camera
        Returns:
            Image: Edited main camera image
        """
        # Applying gradient to whole major image and then writing text
        major_cv_image = utils.apply_gradient(major_cv_image,
                                              self.gradient_img,
                                              self.gradient_alpha)

        # Subscribing to the agent metrics
        mp4_video_metrics_info = list()
        for racecar_info, mp4_video_metrics_srv in zip(
                self.racecars_info, self.mp4_video_metrics_srv_list):
            mp4_video_metrics = mp4_video_metrics_srv(VideoMetricsSrvRequest())
            mp4_video_metrics_info.append(mp4_video_metrics)

        # Adding display name to the image
        display_name_loc = [(10, 10), (450, 10)]
        agents_speed = 0
        agent_done = False
        for i, racecar_info in enumerate(self.racecars_info):
            loc_x, loc_y = display_name_loc[i][0], display_name_loc[i][1]
            # Display name (Racer name/Model name)
            display_name = racecar_info['display_name']
            display_name_txt = display_name if len(
                display_name) < 15 else "{}...".format(display_name[:15])
            major_cv_image = utils.write_text_on_image(
                image=major_cv_image,
                text=display_name_txt,
                loc=(loc_x, loc_y),
                font=self.amazon_ember_regular_20px,
                font_color=RaceCarColorToRGB.White.value,
                font_shadow_color=RaceCarColorToRGB.Black.value)
            # Lap Counter
            loc_y += 30
            total_laps = rospy.get_param("NUMBER_OF_TRIALS", 0)
            lap_counter_text = "{}/{}".format(
                int(mp4_video_metrics_info[i].lap_counter), total_laps)
            major_cv_image = utils.write_text_on_image(
                image=major_cv_image,
                text=lap_counter_text,
                loc=(loc_x, loc_y),
                font=self.amazon_ember_heavy_30px,
                font_color=RaceCarColorToRGB.White.value,
                font_shadow_color=RaceCarColorToRGB.Black.value)
            # Reset counter
            loc_y += 45
            reset_counter_text = "Reset | {}".format(
                mp4_video_metrics_info[i].reset_counter)
            major_cv_image = utils.write_text_on_image(
                image=major_cv_image,
                text=reset_counter_text,
                loc=(loc_x, loc_y),
                font=self.amazon_ember_light_18px,
                font_color=RaceCarColorToRGB.White.value,
                font_shadow_color=RaceCarColorToRGB.Black.value)
            if self.racecar_name == racecar_info['name']:
                agents_speed = mp4_video_metrics_info[i].throttle
            agent_done = agent_done or mp4_video_metrics_info[i].done

        # Speed
        loc_x, loc_y = 10, 420
        speed_text = "{} m/s".format(
            utils.get_speed_formatted_str(agents_speed))
        major_cv_image = utils.write_text_on_image(
            image=major_cv_image,
            text=speed_text,
            loc=(loc_x, loc_y),
            font=self.amazon_ember_light_20px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)
        # Evaluation type
        loc_y += 25
        # TODO - Show text based on whether its a race or customer run evaluation
        race_text = "race"
        evaluation_type_txt = "{} {}".format(
            RACE_TYPE_TO_VIDEO_TEXT_MAPPING[self.race_type], race_text)
        major_cv_image = utils.write_text_on_image(
            image=major_cv_image,
            text=evaluation_type_txt,
            loc=(loc_x, loc_y),
            font=self.amazon_ember_light_italic_20px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)
        # total_evaluation_time (Race time)
        loc_x, loc_y = 240, 10
        total_eval_milli_seconds = mp4_video_metrics_info[
            0].total_evaluation_time
        time_delta = datetime.timedelta(milliseconds=total_eval_milli_seconds)
        total_eval_time_text = "Race | {}".format(
            utils.milliseconds_to_timeformat(time_delta))
        major_cv_image = utils.write_text_on_image(
            image=major_cv_image,
            text=total_eval_time_text,
            loc=(loc_x, loc_y),
            font=self.amazon_ember_light_18px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)
        # Check if the done flag is set and set the banner appropriately
        if agent_done:
            # When the cv2 text is written, it automatically drops the alpha value of the image
            major_cv_image = cv2.cvtColor(major_cv_image, cv2.COLOR_RGB2RGBA)
            racecomplete_image = utils.get_image(
                TrackAssetsIconographicPngs.RACE_COMPLETE_OVERLAY_PNG.value,
                IconographicImageSize.RACE_COMPLETE_IMAGE_SIZE.value)
            x_offset = major_cv_image.shape[
                1] - racecomplete_image.shape[1] // 2
            y_offset = major_cv_image.shape[
                0] - 180 - racecomplete_image.shape[0] // 2
            major_cv_image = utils.plot_rectangular_image_on_main_image(
                major_cv_image, racecomplete_image, (x_offset, y_offset))

        return major_cv_image