def __init__(self, racecar_name, racecar_info, race_type):
        """ To capture the video of evaluation done during the training phase
        Args:
            racecar_info (dict): Information of the agent
        """
        self.racecar_info = racecar_info
        racecar_index = get_racecar_idx(racecar_name)
        self.racecar_index = racecar_index if racecar_index else 0
        # Store the font which we will use to write the phase with
        self.training_phase_font = utils.get_font('Amazon_Ember_RgIt', 35)

        # The track image as iconography
        self.track_icongraphy_img = utils.get_track_iconography_image()

        # Track image offset
        self.track_loc_offset = XYPixelLoc.TRACK_IMG_WITHOUT_OFFSET_LOC.value

        # Gradient overlay image
        width, height = IconographicImageSize.FULL_IMAGE_SIZE.value
        image = np.zeros(height * width * 4)
        image.resize(height, width, 4)
        self.gradient_img = self._plot_track_on_gradient(image)
        self.gradient_alpha_rgb_mul, self.one_minus_gradient_alpha = utils.get_gradient_values(
            self.gradient_img)

        # Top camera information
        top_camera_info = utils.get_top_camera_info()
        self.top_view_graphics = TopViewGraphics(
            top_camera_info.horizontal_fov, top_camera_info.padding_pct,
            top_camera_info.image_width, top_camera_info.image_height,
            racecar_info)
Ejemplo n.º 2
0
    def __init__(self, racecar_info):
        """ To capture the video of evaluation done during the training phase
        Args:
            racecar_info (dict): Information of the agent
        """
        self.racecar_info = racecar_info
        # Store the font which we will use to write the phase with
        self.training_phase_font = utils.get_font('Amazon_Ember_RgIt', 35)

        # Subscriber to get the phase of the training (Ideal, training, evaluation)
        rospy.Subscriber('/agent/training_phase', String,
                         self._training_phase_cb_)

        # The track image as iconography
        self.track_icongraphy_img = utils.get_track_iconography_image()
        #
        # TODO: Currently dont have the gradient image for the training. So not using any gradient overlay on image
        # self.gradient_img = utils.get_image(TrackAssetsIconographicPngs.OBSTACLE_OVERLAY_PNG.value,
        #                                     IconographicImageSize.FULL_IMAGE_SIZE.value)
        # self.gradient_alpha = self.gradient_img[:, :, 3] / 255.0
        #

        # String indicating the current phase
        self._current_training_phase = 'Initializing'

        # Top camera information
        top_camera_info = utils.get_top_camera_info()
        self.edited_topview_pub = rospy.Publisher('/deepracer/topview_stream',
                                                  ROSImg,
                                                  queue_size=1)
        self.top_view_graphics = TopViewGraphics(
            top_camera_info.horizontal_fov, top_camera_info.padding_pct,
            top_camera_info.image_width, top_camera_info.image_height,
            racecar_info)
    def __init__(self, racecar_name, racecars_info, race_type):
        """ This class is used for head to head racing where there are more than one agent
        Args:
            racecar_name (str): The agent name with 45degree camera view
            racecars_info (dict): All the agents information
            race_type (str): The type of race. This is used to know if its race type or evaluation
        """
        self.racecar_name = racecar_name
        self.racecars_info = racecars_info
        self.race_type = race_type
        # init cv bridge
        self.bridge = CvBridge()
        # Store the font which we will use to write the phase with
        self.amazon_ember_regular_20px = utils.get_font(
            'AmazonEmber-Regular', 20)
        self.amazon_ember_regular_16px = utils.get_font(
            'AmazonEmber-Regular', 16)
        self.amazon_ember_heavy_30px = utils.get_font('AmazonEmber-Heavy', 30)
        self.amazon_ember_light_18px = utils.get_font('AmazonEmber-Light', 18)
        self.amazon_ember_light_20px = utils.get_font('AmazonEmber-Light', 20)
        self.amazon_ember_light_italic_20px = utils.get_font(
            'AmazonEmber-LightItalic', 20)

        self.is_racing = rospy.get_param("VIDEO_JOB_TYPE", "") == "RACING"
        self.is_league_leaderboard = rospy.get_param("LEADERBOARD_TYPE",
                                                     "") == "LEAGUE"
        self.leaderboard_name = rospy.get_param("LEADERBOARD_NAME", "")

        # The track image as iconography
        self.track_icongraphy_img = utils.get_track_iconography_image()
        gradient_img_path = TrackAssetsIconographicPngs.HEAD_TO_HEAD_OVERLAY_PNG_LEAGUE_LEADERBOARD.value \
            if self.is_league_leaderboard else TrackAssetsIconographicPngs.HEAD_TO_HEAD_OVERLAY_PNG.value
        self.gradient_img = utils.get_image(
            gradient_img_path, IconographicImageSize.FULL_IMAGE_SIZE.value)
        self.gradient_alpha = self.gradient_img[:, :, 3] / 255.0

        self.mp4_video_metrics_srv_list = list()
        for racecar_info in self.racecars_info:
            agent_name = 'agent' if len(
                racecars_info) == 1 else "agent_{}".format(
                    racecar_info['name'].split("_")[1])
            rospy.wait_for_service("/{}/{}".format(agent_name,
                                                   "mp4_video_metrics"))
            self.mp4_video_metrics_srv_list.append(
                ServiceProxyWrapper(
                    "/{}/{}".format(agent_name, "mp4_video_metrics"),
                    VideoMetricsSrv))

        # Top camera information
        top_camera_info = utils.get_top_camera_info()
        self.edited_topview_pub = rospy.Publisher('/deepracer/topview_stream',
                                                  ROSImg,
                                                  queue_size=1)
        self.top_view_graphics = TopViewGraphics(
            top_camera_info.horizontal_fov, top_camera_info.padding_pct,
            top_camera_info.image_width, top_camera_info.image_height,
            racecars_info)
Ejemplo n.º 4
0
    def __init__(self, racecar_name, racecars_info, race_type):
        """ This class is used for head to head racing where there are more than one agent
        Args:
            racecar_name (str): The agent name with 45degree camera view
            racecars_info (dict): All the agents information
            race_type (str): The type of race. This is used to know if its race type or evaluation
        """
        self.racecar_name = racecar_name
        self.racecars_info = racecars_info
        racecar_index = get_racecar_idx(racecar_name)
        self.racecar_index = racecar_index if racecar_index else 0
        self.race_type = race_type

        # Store the font which we will use to write the phase with
        self.amazon_ember_regular_20px = utils.get_font(
            'AmazonEmber-Regular', 20)
        self.amazon_ember_regular_16px = utils.get_font(
            'AmazonEmber-Regular', 16)
        self.amazon_ember_heavy_30px = utils.get_font('AmazonEmber-Heavy', 30)
        self.amazon_ember_light_18px = utils.get_font('AmazonEmber-Light', 18)
        self.amazon_ember_light_20px = utils.get_font('AmazonEmber-Light', 20)
        self.amazon_ember_light_italic_20px = utils.get_font(
            'AmazonEmber-LightItalic', 20)

        self.is_racing = rospy.get_param("VIDEO_JOB_TYPE", "") == "RACING"
        self.is_league_leaderboard = rospy.get_param("LEADERBOARD_TYPE",
                                                     "") == "LEAGUE"
        self.leaderboard_name = rospy.get_param("LEADERBOARD_NAME", "")
        self._total_laps = int(rospy.get_param("NUMBER_OF_TRIALS", 0))

        # The track image as iconography
        self.track_icongraphy_img = utils.get_track_iconography_image()

        # Track image offset
        self.track_loc_offset = XYPixelLoc.TRACK_IMG_WITH_OFFSET_LOC.value if self.is_league_leaderboard \
            else XYPixelLoc.TRACK_IMG_WITHOUT_OFFSET_LOC.value

        gradient_img_path = TrackAssetsIconographicPngs.HEAD_TO_HEAD_OVERLAY_PNG_LEAGUE_LEADERBOARD.value \
            if self.is_league_leaderboard else TrackAssetsIconographicPngs.HEAD_TO_HEAD_OVERLAY_PNG.value
        self.gradient_img = self._plot_track_on_gradient(gradient_img_path)
        self.gradient_alpha_rgb_mul, self.one_minus_gradient_alpha = utils.get_gradient_values(
            self.gradient_img)

        # Top camera information
        top_camera_info = utils.get_top_camera_info()
        self.top_view_graphics = TopViewGraphics(
            top_camera_info.horizontal_fov, top_camera_info.padding_pct,
            top_camera_info.image_width, top_camera_info.image_height,
            racecars_info)
Ejemplo n.º 5
0
    def __init__(self, racecar_name, racecar_info, race_type):
        """ Initializing the required data for the head to bot, time-trail. This is used for single agent
        Arguments:
            racecars_info (list): list of dict having information of the agent
            race_type (str): Since this class is reused for all the different race_type
        """
        self.racecar_info = racecar_info
        self.race_type = race_type
        racecar_index = get_racecar_idx(racecar_name)
        self.racecar_index = racecar_index if racecar_index else 0
        # Store the font which we will use to write the phase with
        self.amazon_ember_regular_20px = utils.get_font(
            'AmazonEmber-Regular', 20)
        self.amazon_ember_regular_16px = utils.get_font(
            'AmazonEmber-Regular', 16)
        self.amazon_ember_heavy_30px = utils.get_font('AmazonEmber-Heavy', 30)
        self.amazon_ember_light_18px = utils.get_font('AmazonEmber-Light', 18)
        self.amazon_ember_light_20px = utils.get_font('AmazonEmber-Light', 20)
        self.amazon_ember_light_italic_20px = utils.get_font(
            'AmazonEmber-LightItalic', 20)

        self.is_racing = rospy.get_param("VIDEO_JOB_TYPE", "") == "RACING"
        self.is_league_leaderboard = rospy.get_param("LEADERBOARD_TYPE",
                                                     "") == "LEAGUE"
        self.leaderboard_name = rospy.get_param("LEADERBOARD_NAME", "")
        self._total_laps = int(rospy.get_param("NUMBER_OF_TRIALS", 0))

        # The track image as iconography
        self.track_icongraphy_img = utils.get_track_iconography_image()

        # Track image offset
        self.track_loc_offset = XYPixelLoc.TRACK_IMG_WITH_OFFSET_LOC.value if self.is_league_leaderboard \
            else XYPixelLoc.TRACK_IMG_WITHOUT_OFFSET_LOC.value

        # Gradient overlay image
        gradient_img_path = TrackAssetsIconographicPngs.OBSTACLE_OVERLAY_PNG_LEAGUE_LEADERBOARD.value \
            if self.is_league_leaderboard else TrackAssetsIconographicPngs.OBSTACLE_OVERLAY_PNG.value
        self.gradient_img = self._plot_track_on_gradient(gradient_img_path)
        self.gradient_alpha_rgb_mul, self.one_minus_gradient_alpha = utils.get_gradient_values(
            self.gradient_img)

        # Top camera information
        top_camera_info = utils.get_top_camera_info()
        self.top_view_graphics = TopViewGraphics(
            top_camera_info.horizontal_fov, top_camera_info.padding_pct,
            top_camera_info.image_width, top_camera_info.image_height,
            racecar_info)
    def __init__(self, racecar_info, race_type):
        """ Initializing the required data for the head to bot, time-trail. This is used for single agent
        Arguments:
            racecars_info (list): list of dict having information of the agent
            race_type (str): Since this class is reused for all the different race_type
        """
        self.racecar_info = racecar_info
        self.race_type = race_type
        # Store the font which we will use to write the phase with
        self.amazon_ember_regular_20px = utils.get_font(
            'AmazonEmber-Regular', 20)
        self.amazon_ember_regular_16px = utils.get_font(
            'AmazonEmber-Regular', 16)
        self.amazon_ember_heavy_30px = utils.get_font('AmazonEmber-Heavy', 30)
        self.amazon_ember_light_18px = utils.get_font('AmazonEmber-Light', 18)
        self.amazon_ember_light_20px = utils.get_font('AmazonEmber-Light', 20)
        self.amazon_ember_light_italic_20px = utils.get_font(
            'AmazonEmber-LightItalic', 20)

        self.is_racing = rospy.get_param("VIDEO_JOB_TYPE", "") == "RACING"
        self.is_league_leaderboard = rospy.get_param("LEADERBOARD_TYPE",
                                                     "") == "LEAGUE"
        self.leaderboard_name = rospy.get_param("LEADERBOARD_NAME", "")

        # The track image as iconography
        self.track_icongraphy_img = utils.get_track_iconography_image()
        gradient_img_path = TrackAssetsIconographicPngs.OBSTACLE_OVERLAY_PNG_LEAGUE_LEADERBOARD.value \
            if self.is_league_leaderboard else TrackAssetsIconographicPngs.OBSTACLE_OVERLAY_PNG.value
        self.gradient_img = utils.get_image(
            gradient_img_path, IconographicImageSize.FULL_IMAGE_SIZE.value)
        self.gradient_alpha = self.gradient_img[:, :, 3] / 255.0

        # Subscribing to the agent metrics
        rospy.wait_for_service("/agent/mp4_video_metrics")
        self.mp4_video_metrics_srv = ServiceProxyWrapper(
            "/agent/mp4_video_metrics", VideoMetricsSrv)

        # Top camera information
        top_camera_info = utils.get_top_camera_info()
        self.edited_topview_pub = rospy.Publisher('/deepracer/topview_stream',
                                                  ROSImg,
                                                  queue_size=1)
        self.top_view_graphics = TopViewGraphics(
            top_camera_info.horizontal_fov, top_camera_info.padding_pct,
            top_camera_info.image_width, top_camera_info.image_height,
            racecar_info)
Ejemplo n.º 7
0
    def __init__(self, racecar_name, racecars_info, race_type):
        """ This class is used for head to head racing where there are more than one agent
        Args:
            racecar_name (str): The agent name with 45degree camera view
            racecars_info (dict): All the agents information
            race_type (str): The type of race. This is used to know if its race type or evaluation
        """
        self.racecar_name = racecar_name
        self.racecars_info = racecars_info
        racecar_index = get_racecar_idx(racecar_name)
        self.racecar_index = racecar_index if racecar_index else 0
        self.race_type = race_type

        # Store the font which we will use to write the phase with
        self.formula1_display_regular_12px = utils.get_font(
            'Formula1-Display-Regular', 12)
        self.formula1_display_regular_14px = utils.get_font(
            'Formula1-Display-Regular', 14)
        self.formula1_display_regular_16px = utils.get_font(
            'Formula1-Display-Regular', 16)
        self.formula1_display_wide_12px = utils.get_font(
            'Formula1-Display-Wide', 12)
        self.formula1_display_bold_16px = utils.get_font(
            'Formula1-Display-Bold', 16)

        self.total_laps = int(rospy.get_param("NUMBER_OF_TRIALS", 0))
        self.is_league_leaderboard = rospy.get_param("LEADERBOARD_TYPE",
                                                     "") == "LEAGUE"
        self.leaderboard_name = rospy.get_param("LEADERBOARD_NAME", "")

        # The track image as iconography
        self.track_icongraphy_img = utils.get_track_iconography_image()

        # Track image offset
        self.track_loc_offset = XYPixelLoc.TRACK_IMG_WITHOUT_OFFSET_LOC.value

        # Default image of top view
        gradient_default_img_path = TrackAssetsIconographicPngs.F1_OVERLAY_DEFAULT_PNG.value
        self.gradient_default_img = self._plot_track_on_gradient(
            gradient_default_img_path)
        self.gradient_default_alpha_rgb_mul, self.one_minus_gradient_default_alpha = utils.get_gradient_values(
            self.gradient_default_img)

        # Midway track gradient
        gradient_midway_img_path = TrackAssetsIconographicPngs.F1_OVERLAY_MIDWAY_PNG.value
        self.gradient_midway_img = self._plot_track_on_gradient(
            gradient_midway_img_path)
        self.gradient_midway_alpha_rgb_mul, self.one_minus_gradient_midway_alpha = utils.get_gradient_values(
            self.gradient_midway_img)

        # Finisher track gradient
        gradient_finisher_img_path = TrackAssetsIconographicPngs.F1_OVERLAY_FINISHERS_PNG.value
        self.gradient_finisher_img = self._plot_track_on_gradient(
            gradient_finisher_img_path)
        self.gradient_finisher_alpha_rgb_mul, self.one_minus_gradient_finisher_alpha = utils.get_gradient_values(
            self.gradient_finisher_img)

        # Top camera gradient
        num_racers = len(self.racecars_info)
        if num_racers <= 8:
            # TODO: Add one box image and use 1 box image if number of racers are <= 4.
            gradient_top_camera_img_path = TrackAssetsIconographicPngs.F1_OVERLAY_TOPVIEW_2BOX_PNG.value
        elif num_racers <= 12:
            gradient_top_camera_img_path = TrackAssetsIconographicPngs.F1_OVERLAY_TOPVIEW_3BOX_PNG.value
        else:
            raise Exception(
                "More than 12 racers are not supported for Grand Prix")

        gradient_top_camera_img = utils.get_image(
            gradient_top_camera_img_path,
            IconographicImageSize.FULL_IMAGE_SIZE.value)
        gradient_top_camera_img = cv2.cvtColor(gradient_top_camera_img,
                                               cv2.COLOR_RGBA2BGRA)
        self.gradient_top_camera_alpha_rgb_mul, self.one_minus_gradient_top_camera_alpha = utils.get_gradient_values(
            gradient_top_camera_img)

        # Top camera information
        top_camera_info = utils.get_top_camera_info()
        self.edited_topview_pub = rospy.Publisher('/deepracer/topview_stream',
                                                  ROSImg,
                                                  queue_size=1)
        self.top_view_graphics = TopViewGraphics(
            top_camera_info.horizontal_fov, top_camera_info.padding_pct,
            top_camera_info.image_width, top_camera_info.image_height,
            racecars_info, race_type)
        self.hex_car_colors = [
            val['racecar_color'].split('_')[-1] for val in racecars_info
        ]
        self._racer_color_code_rect_img = list()
        self._racer_color_code_slash_img = list()
        for car_color in self.hex_car_colors:
            # Rectangular png of racers
            racer_color_code_rect = "{}_{}".format(
                TrackAssetsIconographicPngs.F1_AGENTS_RECT_DISPLAY_ICON_PNG.
                value, car_color)
            self._racer_color_code_rect_img.append(
                utils.get_image(
                    racer_color_code_rect, IconographicImageSize.
                    F1_RACER_RECT_DISPLAY_ICON_SIZE.value))
            # Slash png of racers
            racer_color_code_slash = "{}_{}".format(
                TrackAssetsIconographicPngs.F1_AGENTS_SLASH_DISPLAY_ICON_PNG.
                value, car_color)
            racer_color_code_slash_img = utils.get_image(
                racer_color_code_slash,
                IconographicImageSize.F1_RACER_SLASH_DISPLAY_ICON_SIZE.value)
            self._racer_color_code_slash_img.append(
                cv2.cvtColor(racer_color_code_slash_img, cv2.COLOR_RGBA2BGRA))
    def __init__(self, racecar_name, racecar_info, race_type):
        """ Initializing the required data for the head to bot, time-trail. This is used for single agent
        Arguments:
            racecar_name (str): racecar name in string
            racecars_info (list): list of dict having information of the agent
            race_type (str): Since this class is reused for all the different race_type
        """
        # race duration in milliseconds
        self._world_name = rospy.get_param("WORLD_NAME")
        self.num_sectors = int(rospy.get_param("NUM_SECTORS", "3"))
        self.race_duration = int(
            rospy.get_param("RACE_DURATION", DEFAULT_RACE_DURATION)) * 1000
        self.racecar_info = racecar_info
        self.race_type = race_type
        racecar_index = get_racecar_idx(racecar_name)
        self.racecar_index = racecar_index if racecar_index else 0
        # Store the font which we will use to write the phase with
        self.amazon_ember_regular_28px = utils.get_font(
            'AmazonEmber-Regular', 28)
        self.amazon_ember_regular_14px = utils.get_font(
            'AmazonEmber-Regular', 14)

        # The track image as iconography
        self.track_icongraphy_img = utils.get_track_iconography_image()

        # Track image offset
        self.track_loc_offset = VirtualEventXYPixelLoc.TRACK_IMG_VIRTUAL_EVENT_LOC.value
        self._track_x_min = None
        self._track_x_max = None
        self._track_y_min = None
        self._track_y_max = None

        # Gradient overlay image with track and virtual event mock
        gradient_img_path = VirtualEventIconographicPngs.OVERLAY_PNG.value
        self.gradient_img = self._plot_track_on_gradient(gradient_img_path)

        # Time remaining text
        loc_x, loc_y = VirtualEventXYPixelLoc.TIME_REMAINING_TEXT.value
        self.gradient_img = utils.write_text_on_image(
            image=self.gradient_img,
            text="TIME REMAINING",
            loc=(loc_x, loc_y),
            font=self.amazon_ember_regular_14px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)

        # Speed text
        loc_x, loc_y = VirtualEventXYPixelLoc.SPEED_TEXT.value
        self.gradient_img = utils.write_text_on_image(
            image=self.gradient_img,
            text="m/s",
            loc=(loc_x, loc_y),
            font=self.amazon_ember_regular_14px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)

        # Reset text
        loc_x, loc_y = VirtualEventXYPixelLoc.RESET_TEXT.value
        self.gradient_img = utils.write_text_on_image(
            image=self.gradient_img,
            text="RESET",
            loc=(loc_x, loc_y),
            font=self.amazon_ember_regular_14px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)

        # current lap time text
        loc_x, loc_y = VirtualEventXYPixelLoc.CURRENT_LAP_TIME_TEXT.value
        self.gradient_img = utils.write_text_on_image(
            image=self.gradient_img,
            text="CURRENT LAP TIME",
            loc=(loc_x, loc_y),
            font=self.amazon_ember_regular_14px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)

        # best lap time text
        loc_x, loc_y = VirtualEventXYPixelLoc.BEST_LAP_TIME_TEXT.value
        self.gradient_img = utils.write_text_on_image(
            image=self.gradient_img,
            text="BEST LAP TIME",
            loc=(loc_x, loc_y),
            font=self.amazon_ember_regular_14px,
            font_color=RaceCarColorToRGB.White.value,
            font_shadow_color=RaceCarColorToRGB.Black.value)

        # apply graident
        self.gradient_alpha_rgb_mul, self.one_minus_gradient_alpha = utils.get_gradient_values(
            self.gradient_img)

        # Top camera information
        top_camera_info = utils.get_top_camera_info()
        self.top_view_graphics = TopViewGraphics(
            top_camera_info.horizontal_fov,
            top_camera_info.padding_pct,
            top_camera_info.image_width,
            top_camera_info.image_height,
            racecar_info,
            is_virtual_event=True)

        # virtual event image editting state machine
        self._image_edit_fsm = FSM(initial_state=VirtualEventWaitState())
        # if best sector time download from s3 failed. Then, initialize best sector time as None
        # and not display sector color
        self._sector_times = {}

        # declare sector images
        self._sectors_img_dict = {}
        for idx in range(self.num_sectors):
            sector = SECTOR_X_FORMAT.format(idx + 1)
            sector_color_img_dict = utils.init_sector_img_dict(
                world_name=self._world_name, sector=sector)
            self._sectors_img_dict[sector] = sector_color_img_dict

        # use the s3 bucket and prefix for yaml file stored as environment variable because
        # here is SimApp use only. For virtual event there is no s3 bucket and prefix past
        # through yaml file. All are past through sqs. For simplicity, reuse the yaml s3 bucket
        # and prefix environment variable.
        self._virtual_event_best_sector_time = VirtualEventBestSectorTime(
            bucket=os.environ.get("YAML_S3_BUCKET", ''),
            s3_key=get_s3_key(os.environ.get("YAML_S3_PREFIX", ''),
                              SECTOR_TIME_S3_POSTFIX),
            region_name=os.environ.get("APP_REGION", "us-east-1"),
            local_path=SECTOR_TIME_LOCAL_PATH)
        self._sector_times.update(
            self._virtual_event_best_sector_time.get_sector_time(
                num_sectors=self.num_sectors))

        # declare default best personal and current persoanl time to inf
        for idx in range(self.num_sectors):
            sector = SECTOR_X_FORMAT.format(idx + 1)
            self._sector_times[SECTOR_TIME_FORMAT_DICT[
                TrackSectorTime.BEST_PERSONAL].format(sector)] = float("inf")
            self._sector_times[SECTOR_TIME_FORMAT_DICT[
                TrackSectorTime.CURRENT_PERSONAL].format(sector)] = float(
                    "inf")

        self._curr_lap_time = 0
        self._last_eval_time = 0
        self._curr_progress = 0
        self._last_progress = 0
        self._current_lap = 1

        # Initializing the fader behaviour to pre-compute the gradient values
        final_fading_image = utils.get_image(
            VirtualEventIconographicPngs.FINAL_FADING_IMAGE_50ALPHA.value,
            IconographicImageSize.FULL_IMAGE_SIZE.value)
        final_fading_image = cv2.cvtColor(final_fading_image,
                                          cv2.COLOR_RGBA2BGRA)
        self._fader_obj = Fader(
            final_fading_image,
            fading_min_percent=VirtualEventFader.FADING_MIN_PERCENT.value,
            fading_max_percent=VirtualEventFader.FADING_MAX_PERCENT.value,
            num_frames=VirtualEventFader.NUM_FRAMES.value)