コード例 #1
0
    def get_ult_colors_from_image(self):
        """Get ultimate charge number colors from this frame.

        Author:
            Rigel

        Args:
            None

        Returns:
            @ult_color: array of int, -1: white number, 1: black number
        """
        left_pre_pos = OW.get_ult_charge_color_pre_pos(True)[self.game.game_type]
        left_pre_image = ImageUtils.rgb_to_gray(ImageUtils.crop(self.image, left_pre_pos))
        left_shear = ImageUtils.shear(left_pre_image, OW.get_tf_shear(True)[self.game.game_type])
        left_pos = OW.get_ult_charge_color_pos(True)[self.game.game_type]
        left_image = ImageUtils.crop(left_shear, left_pos)
        left_image_g = ImageUtils.contrast_adjust_log(left_image, OW.ULT_ADJUST_LOG_INDEX)
        left_bin = ImageUtils.binary_otsu(left_image_g)

        right_pre_pos = OW.get_ult_charge_color_pre_pos(False)[self.game.game_type]
        right_pre_image = ImageUtils.rgb_to_gray(ImageUtils.crop(self.image, right_pre_pos))
        right_shear = ImageUtils.shear(right_pre_image, OW.get_tf_shear(False)[self.game.game_type])
        right_pos = OW.get_ult_charge_color_pos(False)[self.game.game_type]
        right_image = ImageUtils.crop(right_shear, right_pos)
        right_image_g = ImageUtils.contrast_adjust_log(right_image, OW.ULT_ADJUST_LOG_INDEX)
        right_bin = ImageUtils.binary_otsu(right_image_g)
        return {
            "left": np.sign(2 * np.sum(left_bin) - np.size(left_bin)),
            "right": np.sign(2 * np.sum(right_bin) - np.size(right_bin))
        }
コード例 #2
0
    def get_living_status(self, avatar_ref):
        """Retrieves chara living status for current player.

        If the chara is dead, general variation of avatar brightness gets lower
        than reference.

        Author:
            Appcell

        Args:
            avatar_ref: reference avatar image

        Returns:
            None 
        """
        avatar = []
        if self.is_observed:
            avatar = ImageUtils.crop(
                self.image,
                OW.get_avatar_pos(self.index)[self.frame.game.game_type])
        else:
            avatar = ImageUtils.crop(
                self.image,
                OW.get_avatar_pos_small(self.index)[self.frame.game.game_type])
        brightness = np.mean(avatar, 2)
        brightness_ref = np.mean(avatar_ref, 2)
        variation = brightness.max() - brightness.min()
        variation_ref = brightness_ref.max() - brightness_ref.min()

        # TODO: write consts here into ow.py
        if abs(variation_ref - variation) > 45:
            self.is_dead = True
コード例 #3
0
ファイル: frame.py プロジェクト: Rigel7/OverwatchDataAnalysis
    def validate(self):
        """Validate this frame, set up Game obj if it's not set.

        Validation by:
        1) Test if there's any players detectable. If none, frame is invalid
        2) Test if top-right corner is white. If not, frame is invalid
        If frame is valid and Game info (i.e. team colors, avatars) are not
        set, set them up.

        Author:
            Appcell

        Args:
            None

        Returns:
            None 
        """
        flag = False
        for player in self.players:
            if player.is_dead is False:
                flag = True

        validation_roi = ImageUtils.crop(
            self.image, OW.FRAME_VALIDATION_POS[self.game.game_type])

        std = np.max([
            np.std(validation_roi[:, :, 0]),
            np.std(validation_roi[:, :, 1]),
            np.std(validation_roi[:, :, 2])
        ])

        mean = [
            np.mean(validation_roi[:, :, 0]),
            np.mean(validation_roi[:, :, 1]),
            np.mean(validation_roi[:, :, 2])
        ]

        if std < OW.FRAME_VALIDATION_COLOR_STD[self.game.game_type] \
                and np.mean(mean) > OW.FRAME_VALIDATION_COLOR_MEAN[self.game.game_type] \
                and flag is True:
            self.is_valid = True

        replay_icon = ImageUtils.crop(
            self.image,
            OW.get_replay_icon_pos()[self.game.game_type])
        # cv2.imshow('t', self.image)
        # cv2.waitKey(0)

        replay_match_result = cv2.matchTemplate(replay_icon,
                                                self.game.replay_icon_ref,
                                                cv2.TM_CCOEFF_NORMED)
        _, max_val, _, _ = cv2.minMaxLoc(replay_match_result)
        if max_val < OW.FRAME_VALIDATION_REPLAY_PROB[self.game.game_type]:
            self.is_valid = True

        if self.is_valid is True and self.game.team_colors is None:
            self.game.set_team_colors(self)
            self.game.avatars_ref = self._get_avatars_before_validation()
コード例 #4
0
 def crop_image(self, img, mask, bbox, kp, vis, sfm_pose):
     # crop image and mask and translate kps
     img = image_utils.crop(img, bbox, bgval=1)
     mask = image_utils.crop(mask, bbox, bgval=0)
     kp[vis, 0] -= bbox[0]
     kp[vis, 1] -= bbox[1]
     sfm_pose[1][0] -= bbox[0]
     sfm_pose[1][1] -= bbox[1]
     return img, mask, kp, sfm_pose
コード例 #5
0
 def crop_image(self, img, mask, bbox, kp, vis, sfm_pose):
     # crop image and mask and translate kps
     img = image_utils.crop(img, bbox, bgval=1)
     mask = image_utils.crop(mask, bbox, bgval=0)
     # subtract x1, y1 to translate the coordinate system to (0, 0)
     kp[vis, 0] -= bbox[0]
     kp[vis, 1] -= bbox[1]
     sfm_pose[1][0] -= bbox[0]
     sfm_pose[1][1] -= bbox[1]
     return img, mask, kp, sfm_pose
コード例 #6
0
    def get_chara(self):
        """Retrieves chara name for current player in current frame.

        Compare cropped avatar with reference avatars, pick the best match as 
        the chara current player plays with. In OWL, currently observed player
        has a larger avatar. To differentiate between the two, comparison has
        to run twice and the better match gets chosen.

        Author:
            Appcell

        Args:
            None

        Returns:
            None 
        """
        all_avatars = self.frame.get_avatars(self.index)
        avatars_ref = all_avatars["normal"]
        avatars_small_ref = all_avatars["small"]

        # Crop avatar from frame
        avatar = ImageUtils.crop(
            self.image,
            OW.get_avatar_pos(self.index)[self.frame.game.game_type])
        avatar_small = ImageUtils.crop(
            avatar, [1, avatar.shape[0] - 4, 0, avatar.shape[1]])
        score = 0
        # if self.index == 0:
        #     cv2.imshow('t',avatar)
        #     cv2.imshow('t',avatar_small)
        #     cv2.waitKey(0)
        for (name, avatar_ref) in avatars_ref.iteritems():

            s = cv2.matchTemplate(avatar, avatar_ref, cv2.TM_CCOEFF_NORMED)
            _, s, _, _ = cv2.minMaxLoc(s)
            s_small = cv2.matchTemplate(avatar_small, avatars_small_ref[name],
                                        cv2.TM_CCOEFF_NORMED)
            _, s_small, _, _ = cv2.minMaxLoc(s_small)

            s_final = s if s > s_small else s_small

            # if self.index == 0 and (name == 'roadhog' or name == 'symmetra' or name == 'orisa'):
            #     print [name, s_small, s]
            if s_final > score:
                score = s_final
                self.chara = name
                self.is_observed = True if s > s_small else False

        if self.chara == None:
            self.chara = "empty"
            self.is_dead = True
            return

        self.get_living_status(avatars_ref[self.chara])
コード例 #7
0
    def get_ult_status(self):
        """Retrieves ultimate statues info for current player in current frame.

        Author:
            Appcell

        Args:
            None

        Returns:
            None 
        """
        # Crop icon from current frame
        ult_icon_pos = OW.get_ult_icon_pos(
            self.index)[self.frame.game.game_type]
        ult_icon = ImageUtils.crop(self.image, ult_icon_pos)
        # Get reference icon image
        ult_icon_ref = OW.get_ult_icon_ref(
            self.index)[self.frame.game.game_type]
        # Tranfer both to grayscale for comparison
        ult_icon_ref, ult_icon = ImageUtils.rgb_to_gray(
            ult_icon_ref), ImageUtils.rgb_to_gray(ult_icon)

        # Compare cropped icon with reference, get the probability of them
        # being similar
        prob_map = cv2.matchTemplate(ult_icon, ult_icon_ref,
                                     cv2.TM_CCOEFF_NORMED)
        prob_map_cropped = prob_map[0:(ult_icon.shape[0] -
                                       ult_icon_ref.shape[0]), :]
        _, prob, _, loc = cv2.minMaxLoc(prob_map_cropped)

        # To avoid possible explosion effect.
        # When ult gets ready, brightness of icon goes above limit.
        brightness = np.mean(ult_icon)
        deviation = np.std(ult_icon)

        if brightness > OW.ULT_ICON_MAX_BRIGHTNESS[self.frame.game.game_type] \
            and deviation < OW.ULT_ICON_MAX_DEVIATION[self.frame.game.game_type]:
            prob = 1
            self.is_ult_ready = True
            return

        temp_ult_icon = ImageUtils.crop(
            ult_icon,
            [loc[1], ult_icon_ref.shape[0], loc[0], ult_icon_ref.shape[1]])
        prob_ssim = measure.compare_ssim(temp_ult_icon,
                                         ult_icon_ref,
                                         multichannel=False)

        if prob > OW.ULT_ICON_MAX_PROB[self.frame.game.game_type]:
            if prob_ssim > OW.ULT_ICON_MAX_PROB_SSIM[
                    self.frame.game.game_type]:
                self.is_ult_ready = True
コード例 #8
0
ファイル: landmark.py プロジェクト: zhaoyin214/cv_lab_pytorch
    def __getitem__(self, index: int) -> Sample:

        # image
        image_filepath = os.path.join(self._root, self._image_list[index])
        image = cv2.imread(image_filepath)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        h, w = image.shape[:2]

        # roi
        bbox = self._bbox_list[index].copy()
        # expanding
        bbox = self._expand(bbox, (w, h))
        # cropping roi
        image = crop(image, bbox)

        # landmarks
        landmarks = self._landmark_list[index].copy()
        landmarks_wrapper = LandmarksWrapper(landmarks)
        # 0-1 normalization
        landmarks_wrapper.norm_01(bbox)

        sample = {"image": image, "landmarks": landmarks_wrapper.landmarks}

        if self._transform:
            sample = self._transform(sample)

        return sample
コード例 #9
0
def preprocess_image(img_path, img_size=256):
    img = io.imread(img_path) / 255.
    # Scale the max image size to be img_size
    #-這邊將圖片的大小scale到257
    scale_factor = float(img_size) / np.max(img.shape[:2])
    img, _ = img_util.resize_img(img, scale_factor)  #256x256x3

    # Crop img_size x img_size from the center
    #---------------其實看不太懂它為什麼要切割,因為它切割的大小是257x257,而它縮放的大小是256x256
    #--------------他是不是在耍人阿!??
    #--------------切割是由中心點往外切出一個bounding box
    center = np.round(np.array(img.shape[:2]) / 2).astype(int)
    #p
    #    print("center1:"+str(center))
    # img center in (x, y)
    center = center[::-1]
    #p
    #    print("center2:"+str(center))

    bbox = np.hstack([center - img_size / 2., center + img_size / 2.])
    #p
    #    print("bbox:"+str(bbox))

    img = img_util.crop(img, bbox, bgval=1.)  #257x257x3

    # Transpose the image to 3xHxW
    img = np.transpose(img, (2, 0, 1))  #3x257x257

    return img
コード例 #10
0
ファイル: classifier_model.py プロジェクト: marvintxd/cmr
def preprocess_image(img_path, img_size=256):
    img = io.imread(img_path) / 255.

    # if grayscale, convert to RGB
    if len(img.shape) == 2:
        img = np.repeat(np.expand_dims(img, 2), 3, axis=2)

    # Scale the max image size to be img_size
    scale_factor = float(img_size) / np.max(img.shape[:2])
    img, _ = img_util.resize_img(img, scale_factor)

    # Crop img_size x img_size from the center
    center = np.round(np.array(img.shape[:2]) / 2).astype(int)
    # img center in (x, y)
    center = center[::-1]
    bbox = np.hstack([center - img_size / 2., center + img_size / 2.])

    img = img_util.crop(img, bbox, bgval=1.)

    # Transpose the image to 3xHxW
    img = np.transpose(img, (2, 0, 1))

    # necessary preprocessing for resnet
    img = torch.tensor(img, dtype=torch.float)
    img = resnet_transform(img)

    # random flip
    if np.random.rand(1) > 0.5:
        img = torch.flip(img, (2,))

    return img
コード例 #11
0
    def get_images(self, orig_image: OrthographicImage):
        image = clone(orig_image)

        draw_around_box(image, box=self.box)
        background_color = image.value_from_depth(
            get_distance_to_box(image, self.box))

        mat_image_resized = cv2.resize(image.mat, self.size_resized)

        mat_images = []
        for a in self.a_space:
            rot_mat = cv2.getRotationMatrix2D(
                (self.size_resized[0] / 2, self.size_resized[1] / 2),
                a * 180.0 / np.pi, 1.0)
            rot_mat[:,
                    2] += [(self.size_rotated[0] - self.size_resized[0]) / 2,
                           (self.size_rotated[1] - self.size_resized[1]) / 2]
            dst_depth = cv2.warpAffine(mat_image_resized,
                                       rot_mat,
                                       self.size_rotated,
                                       borderValue=background_color)
            mat_images.append(crop(dst_depth, self.size_cropped))

        mat_images = np.array(mat_images) / np.iinfo(image.mat.dtype).max
        if len(mat_images.shape) == 3:
            mat_images = np.expand_dims(mat_images, axis=-1)

        # mat_images = 2 * mat_images - 1.0
        return mat_images
コード例 #12
0
    def __init__(self, frame, index):
        """Initialize a Killfeed object.

        Author:
            Appcell

        Args:
            frame: the Frame obj current killfeed is in
            index: row number of current killfeed, ranges from 0 to 5.

        Returns:
            None 
        """
        self.player1 = {
            "chara": "empty",
            "player": "empty",
            "team": "empty",
            "pos": -1
        }
        self.player2 = {
            "chara": "empty",
            "player": "empty",
            "team": "empty",
            "pos": -1
        }
        self.ability = 0
        self.assists = []
        self.index = index
        self.is_valid = True
        self.is_headshot = False
        self.frame = frame
        self.game_type = frame.game.game_type

        killfeed_pos = OW.get_killfeed_pos(index)[frame.game.game_type]
        killfeed_with_gap_pos = OW.get_killfeed_with_gap_pos(index)[
            frame.game.game_type]
        self.image = ImageUtils.crop(frame.image, killfeed_pos)
        self.image_with_gap = ImageUtils.crop(
            frame.image, killfeed_with_gap_pos)


        self.get_players()
        self.get_ability_and_assists()
        self.get_headshot()
        self.free()
コード例 #13
0
def get_image_roi(wc):
    config = get_config()
    imgd = get_absolute_path(config.get('KWS', 'images'))
    imgs = glob(os.path.join(imgd, '*.jpg'))
    dids = [os.path.basename(x).replace('.jpg', '') for x in imgs]
    imgp = imgs[dids.index(wc.doc_id)]
    img = imread(imgp)
    path = get_svg_path(wc)
    poly = path2polygon(path)
    return crop(img, poly)
コード例 #14
0
def get_image_roi(wc):
    config = get_config()
    imgd = get_absolute_path(config.get('KWS', 'images'))
    imgs = glob(os.path.join(imgd, '*.jpg'))
    dids = [os.path.basename(x).replace('.jpg', '') for x in imgs]
    imgp = imgs[dids.index(wc.doc_id)]
    img = imread(imgp)
    path = get_svg_path(wc)
    poly = path2polygon(path)
    return crop(img, poly)
コード例 #15
0
    def grasp_convert(self, action: Action,
                      images: List[OrthographicImage]) -> None:
        image = images[0]
        mat_area_image = get_area_of_interest_new(image,
                                                  action.pose,
                                                  border_color=np.nan,
                                                  project_3d=False,
                                                  flags=cv2.INTER_NEAREST).mat

        mat_area_image = mat_area_image.astype(np.float32)
        mat_area_image[mat_area_image < 10 *
                       255] = np.nan  # Make every not found pixel NaN

        # Get distance at gripper for possible collisions
        gripper_one_side_size = 0.5 * image.pixel_size * (action.pose.d + 0.002
                                                          )  # [px]
        area_center = crop(
            mat_area_image,
            (image.pixel_size * 0.012, image.pixel_size * 0.012))
        side_gripper_image_size = (image.pixel_size * 0.025,
                                   image.pixel_size * 0.025)
        area_left = crop(mat_area_image, side_gripper_image_size,
                         (-gripper_one_side_size, 0))
        area_right = crop(mat_area_image, side_gripper_image_size,
                          (gripper_one_side_size, 0))

        # Z is positive!
        z_raw = image.depth_from_value(np.nanmedian(area_center))
        if z_raw is np.NaN:
            area_center = crop(
                mat_area_image,
                (image.pixel_size * 0.03, image.pixel_size * 0.03))
            z_raw = image.depth_from_value(np.nanmedian(area_center))

        z_raw_left = image.depth_from_value(np.nanmin(area_left))
        z_raw_right = image.depth_from_value(np.nanmin(area_right))

        z_raw += self.grasp_z_offset
        z_raw_collision = min(z_raw_left, z_raw_right) - 0.008  # [m]
        z_raw_max = min(
            z_raw, z_raw_collision)  # Get the maximum [m] for impedance mode
        action.pose.z = -z_raw_max
コード例 #16
0
    def place_convert(self, action: Action,
                      images: List[OrthographicImage]) -> None:
        image = images[0]
        mat_area_image = get_area_of_interest_new(image,
                                                  action.pose,
                                                  border_color=np.nan).mat

        mat_area_image = mat_area_image.astype(np.float32)
        mat_area_image[mat_area_image ==
                       0] = np.nan  # Make every not found pixel NaN

        # Get distance at gripper for possible collisions
        gripper_one_side_size = 0.5 * image.pixel_size * (action.pose.d + 0.002
                                                          )  # [px]
        area_center = crop(
            mat_area_image,
            (image.pixel_size * 0.025, image.pixel_size * 0.025))
        side_gripper_image_size = (image.pixel_size * 0.025,
                                   image.pixel_size * 0.025)
        area_left = crop(mat_area_image, side_gripper_image_size,
                         (-gripper_one_side_size, 0))
        area_right = crop(mat_area_image, side_gripper_image_size,
                          (gripper_one_side_size, 0))

        z_raw = image.depth_from_value(np.nanmedian(area_center))
        z_raw_left = image.depth_from_value(np.nanmin(area_left))
        z_raw_right = image.depth_from_value(np.nanmin(area_right))

        if z_raw is np.NaN:
            area_center = crop(
                mat_area_image,
                (image.pixel_size * 0.022, image.pixel_size * 0.03))
            z_raw = image.depth_from_value(np.nanmedian(area_center))

        z_raw += self.place_z_offset
        z_raw_collision = min(z_raw_left, z_raw_right) - 0.008  # [m]
        # z_raw_max = min(z_raw, z_raw_collision)  # Get the maximum [m] for impedance mode
        z_raw_max = z_raw

        action.pose.z = -z_raw_max  # [m] Move slightly up for gripper center point
コード例 #17
0
    def shift_convert(self, action: Action,
                      images: List[OrthographicImage]) -> None:
        image = images[0]
        mat_area_image = get_area_of_interest_new(image,
                                                  action.pose,
                                                  border_color=np.nan).mat

        mat_area_image = mat_area_image.astype(np.float32)
        mat_area_image[mat_area_image ==
                       0] = np.nan  # Make every not found pixel NaN

        # Get distance at gripper for possible collisions
        area_center = crop(mat_area_image,
                           (image.pixel_size * 0.01, image.pixel_size * 0.03))

        z_raw = image.depth_from_value(np.nanmax(area_center))
        z_raw += self.shift_z_offset
        action.pose.z = -z_raw  # [m] Move slightly up for gripper center point
コード例 #18
0
ファイル: demo.py プロジェクト: neka-nat/cmr
def preprocess_image(img_path, img_size=256):
    img = io.imread(img_path) / 255.

    # Scale the max image size to be img_size
    scale_factor = float(img_size) / np.max(img.shape[:2])
    img, _ = img_util.resize_img(img, scale_factor)

    # Crop img_size x img_size from the center
    center = np.round(np.array(img.shape[:2]) / 2).astype(int)
    # img center in (x, y)
    center = center[::-1]
    bbox = np.hstack([center - img_size / 2., center + img_size / 2.])

    img = img_util.crop(img, bbox, bgval=1.)

    # Transpose the image to 3xHxW
    img = np.transpose(img, (2, 0, 1))

    return img
コード例 #19
0
def detect_gesture(pd, tracker, img, action='activate'):
    # Inference
    _, t, status, box = pd.predict(img)
    print('Gesture detection estimated time {:.4f}'.format(t / 1000))
    # Calculate result
    ok = False
    # 0: None, 1: lefthand, 2: righthand, 3: both hands
    if action == 'activate' and (status == 1 or status == 2):
        height, width, _ = img.shape
        xmin, ymin = int(box[0] * width), int(box[1] * height)
        xmax, ymax = int(box[2] * width), int(box[3] * height)
        box = np.array([xmin, ymin, xmax, ymax])
        obj_img = image.crop(img, box)
        obj_img = image.resize(obj_img, tracker.input_shape)
        obj_img = np.array(obj_img / 127.5 - 1, dtype=np.float32)
        ok = tracker.set_anchor(obj_img, box)
    if action == 'deactivate' and status == 3:
        ok = True
    # Return
    return ok
コード例 #20
0
    def get_ult_status(self):
        """Retrieves ultimate statues info for current player in current frame.

        Author:
            Appcell

        Args:
            None

        Returns:
            None 
        """
        # Crop icon from current frame
        ult_icon_pos = OW.get_ult_icon_pos(
            self.index)[self.frame.game.game_type]
        ult_icon = ImageUtils.crop(self.image, ult_icon_pos)
        # Get reference icon image
        ult_icon_ref = OW.get_ult_icon_ref(
            self.index)[self.frame.game.game_type]
        # Tranfer both to grayscale for comparison
        ult_icon_ref, ult_icon = ImageUtils.rgb_to_gray(
            ult_icon_ref), ImageUtils.rgb_to_gray(ult_icon)

        # Compare cropped icon with reference, get the probability of them
        # being similar
        prob = cv2.matchTemplate(ult_icon, ult_icon_ref,
                                 cv2.TM_CCOEFF_NORMED).max()

        # To avoid possible explosion effect.
        # When ult gets ready, brightness of icon goes above limit.
        brightness = np.mean(ult_icon)

        if brightness > OW.ULT_ICON_MAX_BRIGHTNESS[self.frame.game.game_type]:
            prob = 1

        if prob > OW.ULT_ICON_MAX_PROB[self.frame.game.game_type]:
            self.is_ult_ready = True
コード例 #21
0
 def test_crop(self):
     uncropped = np.array([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12],
                            [13, 14, 15, 16]]])
     #Test without offset
     croppedTest1 = np.array([[[6, 7], [10, 11]]])
     croppedTest1Crop = (2, 2)
     croppedTest1Offset = (0, 0)
     self.assertTrue(
         np.allclose(
             pre.crop(uncropped, croppedTest1Crop, croppedTest1Offset),
             croppedTest1))
     #Test offset
     croppedTest2 = np.array([[[11, 12], [15, 16]]])
     croppedTest2Crop = (2, 2)
     croppedTest2Offset = (1, 1)
     self.assertTrue(
         np.allclose(
             pre.crop(uncropped, croppedTest2Crop, croppedTest2Offset),
             croppedTest2))
     #Test non-square factor regions and preference for top and left with odd remainders.
     croppedTest3 = np.array([[[2], [6], [10]]])
     croppedTest3Crop = (3, 1)
     croppedTest3Offset = (0, 0)
     self.assertTrue(
         np.allclose(
             pre.crop(uncropped, croppedTest3Crop, croppedTest3Offset),
             croppedTest3))
     with self.assertRaises(ValueError):
         #Fails when crop region is too big.
         croppedTest4Crop = (6, 3)
         croppedTest4Offset = (0, 0)
         pre.crop(uncropped, croppedTest4Crop, croppedTest4Offset)
         #Fails when offset is too large.
         croppedTest5Crop = (2, 2)
         croppedTest5Offset = (2, 1)
         pre.crop(uncropped, croppedTest5Crop, croppedTest5Offset)
コード例 #22
0
    def _get_icons_weights(self, edge_validation):
        """Get icon comparison scores of row image and icon references.

        Use match template in cv2 to get possible icon and their weights in the
        killfeed image.

        Author:
            Leavebody

        Args:
            edge_validation: A list of booleans. Should be the result of
                _validate_edge()

        Returns:
            A list of dict of matching results, including all possible icons 
            in this killfeed image. A matching result is in the form of:
            {
                "chara": "empty",   # name of chara, or "empty"
                "prob": "empty",  # score from comparison
                "pos": "empty",    # x-axis position of icon in killfeed
                                     row image
            }
        """
        result = []
        for (chara, icon) in self.frame.game.killfeed_icons_ref.iteritems():
            match_result = cv2.matchTemplate(self.image, icon,
                                             cv2.TM_CCOEFF_NORMED)
            # Find two most possible location of this character's icon in the killfeed image.
            # Mask the pixels around the first location to find the second one.
            _, max_val, _, max_loc = cv2.minMaxLoc(match_result)
            # Here we have to allow some error, thus comes +/- 2
            if sum(edge_validation[max_loc[0] - 2: max_loc[0] + 2]) > 0 \
                    and max_val > OW.KILLFEED_MAX_PROB[self.game_type]:
                temp_icon = ImageUtils.crop(self.image, [
                    3, icon.shape[0], max_loc[0],
                    OW.KILLFEED_ICON_WIDTH[self.game_type]
                ])
                score_ssim = measure.compare_ssim(temp_icon,
                                                  icon,
                                                  multichannel=True)
                if score_ssim >= OW.KILLFEED_SSIM_THRESHOLD[self.game_type]:
                    result.append({
                        'chara': chara,
                        'prob': max_val,
                        'pos': max_loc[0]
                    })
            half_mask_width = 5
            mask_index_left = max((max_loc[0] + half_mask_width \
                - OW.ABILITY_GAP_NORMAL[self.game_type] \
                - OW.KILLFEED_ICON_WIDTH[self.game_type], 0))
            mask_index_right = min((
                max_loc[0] - half_mask_width + 1 \
                + OW.ABILITY_GAP_NORMAL[self.game_type] \
                + OW.KILLFEED_ICON_WIDTH[self.game_type],
                OW.KILLFEED_WIDTH[self.game_type] \
                - OW.KILLFEED_ICON_WIDTH[self.game_type]))

            match_result_masked = np.matrix(match_result)
            match_result_masked[0:match_result_masked.shape[0],
                                mask_index_left:mask_index_right] = -1
            _, max_val2, _, max_loc2 = cv2.minMaxLoc(match_result_masked)

            if sum(edge_validation[max_loc2[0]-2: max_loc2[0]+2]) > 0 \
                    and max_val2 > OW.KILLFEED_MAX_PROB[self.game_type]:
                temp_icon2 = ImageUtils.crop(self.image, [
                    3, icon.shape[0], max_loc2[0],
                    OW.KILLFEED_ICON_WIDTH[self.game_type]
                ])
                score_ssim2 = measure.compare_ssim(temp_icon2,
                                                   icon,
                                                   multichannel=True)
                if score_ssim2 >= OW.KILLFEED_SSIM_THRESHOLD[self.game_type]:
                    result.append({
                        'chara': chara,
                        'prob': max_val2,
                        'pos': max_loc2[0]
                    })

        # Since there's often a 'fake' avatar appearing in the middle, here we
        # have to do some filtering. The idea is, if positions of recoged
        # avatars range largely, this kf usually comes with 2 avatars (killer
        # and killed player). A pair of 2 avatars have a minimum distance
        # requirement: distance >= ABILITY_GAP_NORMAL + OW.KILLFEED_ICON_WIDTH
        # Another possible error: tm takes assist icon as a full avatar. This
        # is a bit tricky and my solution is not really optimal. I directly
        # remove those far from left/right edges.
        min_pos = 1000
        max_pos = 0
        for chara in result:
            if chara['pos'] < min_pos:
                min_pos = chara['pos']
            if chara['pos'] > max_pos:
                max_pos = chara['pos']

        if len(result) < 2:
            return result

        # calculate distance between each pair of potential recogs, then
        # remove those which are never visited
        result_validation = [False] * len(result)
        if max_pos - min_pos \
            > OW.ABILITY_GAP_NORMAL[self.game_type] + OW.KILLFEED_ICON_WIDTH[self.game_type] - 10:
            for ind1, chara1 in enumerate(result):
                for ind2, chara2 in enumerate(result):
                    if abs(chara1['pos'] - chara2['pos']) \
                        > OW.ABILITY_GAP_NORMAL[self.game_type] \
                        + OW.KILLFEED_ICON_WIDTH[self.game_type] - 10 \
                        or abs(chara1['pos'] - chara2['pos']) < 10:
                        result_validation[ind1] = True
                        result_validation[ind2] = True

        # If 2 charas, remove those far from left/right edges
        result_filtered = []
        for ind, val in enumerate(result_validation):
            if val is True and abs(result[ind]['pos'] - min_pos) < 15 \
                or abs(result[ind]['pos'] - max_pos) < 15:
                result_filtered.append(result[ind])

        return result_filtered
コード例 #23
0
def main(imgpath=None, svgpath=None, outputfile=None, retake=True, saveimgs=True):
    print('Word pre-processing')
    config = get_config()

    # create an output file
    if outputfile is None:
        txtp = get_absolute_path(config.get('KWS.features', 'file'))
    else:
        txtp = get_absolute_path(os.path.join(outputfile))

    processed = []
    if retake and os.path.exists(txtp):
        takenext = False
        for line in open(txtp, 'r'):
            line = line.strip()
            if takenext and (len(line) >= 9):
                processed.append(line.strip())
                takenext = False
            elif line == "###":
               takenext = True
    else:
        handle = open(txtp, 'w+')
        for param, value in config.items('KWS.prepro'):
            handle.write('%s: %s%s' % (param, value, os.linesep))
        for param, value in config.items('KWS.features'):
            handle.write('%s: %s%s' % (param, value, os.linesep))
        handle.write('###' + os.linesep)
        handle.close()

    # get the data
    if svgpath is None:
        svgd = get_absolute_path(config.get('KWS', 'locations'))
    else:
        svgd = get_absolute_path(svgpath)
    svgs = glob(os.path.join(svgd, '*.svg'))

    if imgpath is None:
        imgd = get_absolute_path(config.get('KWS', 'images'))
    else:
        imgd = get_absolute_path(imgpath)
    imgs = glob(os.path.join(imgd, '*.jpg'))

    # parse some parameter
    threshold = float(config.get('KWS.prepro', 'segmentation_threshold'))
    relative_height = float(config.get('KWS.prepro', 'relative_height'))
    skew_resolution = float(config.get('KWS.prepro', 'angular_resolution'))
    primary_peak_height = float(config.get('KWS.prepro', 'primary_peak_height'))
    secondary_peak_height = float(config.get('KWS.prepro', 'secondary_peak_height'))
    window_width = int(config.get('KWS.features', 'window_width'))
    step_size = int(config.get('KWS.features', 'step_size'))
    blocks = int(config.get('KWS.features', 'number_of_blocks'))
    svgs.sort()
    imgs.sort()

    for svgp, imgp in zip(svgs, imgs):
        svgid = os.path.basename(svgp).replace('.svg', '')
        imgid = os.path.basename(imgp).replace('.jpg', '')
        print('\t%s\n\t%s' % (svgp, imgp))

        if svgid != imgid:
            raise IOError('the id\'s of the image file (%s) and the svg file (%s) are not the same' % (svgid, imgid))

        trans = get_transcription(svgid)

        print('\tdoc id: %s' % svgid)
        wids, paths = parse_svg(svgp)
        img = imread(imgp)
        for wid, path in zip(wids, paths):
            print('\tword id: %s' % wid)

            if retake and (processed.count(wid) == 1):
                print('\talready processed')
                continue

            # look up the corresponding word
            if saveimgs:
                imgfile = wid
                word = get_word(wid, data=trans)
                if word is not None:
                    imgfile = word.code2string() + '_' + imgfile
            else:
                imgfile = None

            # get the word image
            poly = path2polygon(path)
            roi = crop(img, poly)

            pre, sym = word_preprocessor(roi,
                                         threshold=threshold,
                                         rel_height=relative_height,
                                         skew_res=skew_resolution,
                                         ppw=primary_peak_height,
                                         spw=secondary_peak_height,
                                         save=imgfile)

            if type(pre) is str:
                print('\tpre-processing failed\n\t\t%s' % pre)
                continue

            fea = compute_features(pre,
                                   window_width=window_width,
                                   step_size=step_size,
                                   blocks=blocks)

            write_word_features(txtp, wid, fea, [pre.shape[0], pre.shape[1], sym])
            print('...')
コード例 #24
0
    def validate(self):
        """Validate this frame, set up Game obj if it's not set.

        Validation by:
        1) Test if there's any players detectable. If none, frame is invalid
        2) Test if top-right corner is white. If not, frame is invalid
        If frame is valid and Game info (i.e. team colors, avatars) are not
        set, set them up.

        Author:
            Appcell

        Args:
            None

        Returns:
            None 
        """
        flag = False
        for player in self.players:
            if player.is_dead is False:
                flag = True

        if flag == False:
            self.is_valid = False
            return
        else:
            self.is_valid = True
        validation_roi = ImageUtils.crop(
            self.image, OW.FRAME_VALIDATION_POS[self.game.game_type])

        std = np.max([
            np.std(validation_roi[:, :, 0]),
            np.std(validation_roi[:, :, 1]),
            np.std(validation_roi[:, :, 2])
        ])

        mean = [
            np.mean(validation_roi[:, :, 0]),
            np.mean(validation_roi[:, :, 1]),
            np.mean(validation_roi[:, :, 2])
        ]


        if std < OW.FRAME_VALIDATION_COLOR_STD[self.game.game_type] \
                and np.mean(mean) > OW.FRAME_VALIDATION_COLOR_MEAN[self.game.game_type] \
                and flag is True:
            self.is_valid = True
        else:
            self.is_valid = False
            return

        replay_icon = ImageUtils.crop(
            self.image,
            OW.get_replay_icon_pos()[self.game.game_type])

        replay_icon_preseason = ImageUtils.crop(
            self.image,
            OW.get_replay_icon_preseason_pos()[self.game.game_type])
        max_val = measure.compare_ssim(replay_icon,
                                       self.game.replay_icon_ref,
                                       multichannel=True)
        max_val_preseason = measure.compare_ssim(replay_icon_preseason,
                                                 self.game.replay_icon_ref,
                                                 multichannel=True)

        # TODO: another situation: after replay effect there might be a blue
        # rectangle remaining on screen.
        max_val = max_val if max_val > max_val_preseason else max_val_preseason
        if max_val < OW.FRAME_VALIDATION_REPLAY_PROB[self.game.game_type]:
            self.is_valid = True
        else:
            self.is_valid = False
            return

        if self.is_valid is True and self.game.team_colors is None:
            self.game.set_team_colors(self)
            self.game.avatars_ref = self._get_avatars_before_validation()
コード例 #25
0
def main(imgpath=None,
         svgpath=None,
         outputfile=None,
         retake=True,
         saveimgs=True):
    print('Word pre-processing')
    config = get_config()

    # create an output file
    if outputfile is None:
        txtp = get_absolute_path(config.get('KWS.features', 'file'))
    else:
        txtp = get_absolute_path(os.path.join(outputfile))

    processed = []
    if retake and os.path.exists(txtp):
        takenext = False
        for line in open(txtp, 'r'):
            line = line.strip()
            if takenext and (len(line) >= 9):
                processed.append(line.strip())
                takenext = False
            elif line == "###":
                takenext = True
    else:
        handle = open(txtp, 'w+')
        for param, value in config.items('KWS.prepro'):
            handle.write('%s: %s%s' % (param, value, os.linesep))
        for param, value in config.items('KWS.features'):
            handle.write('%s: %s%s' % (param, value, os.linesep))
        handle.write('###' + os.linesep)
        handle.close()

    # get the data
    if svgpath is None:
        svgd = get_absolute_path(config.get('KWS', 'locations'))
    else:
        svgd = get_absolute_path(svgpath)
    svgs = glob(os.path.join(svgd, '*.svg'))

    if imgpath is None:
        imgd = get_absolute_path(config.get('KWS', 'images'))
    else:
        imgd = get_absolute_path(imgpath)
    imgs = glob(os.path.join(imgd, '*.jpg'))

    # parse some parameter
    threshold = float(config.get('KWS.prepro', 'segmentation_threshold'))
    relative_height = float(config.get('KWS.prepro', 'relative_height'))
    skew_resolution = float(config.get('KWS.prepro', 'angular_resolution'))
    primary_peak_height = float(config.get('KWS.prepro',
                                           'primary_peak_height'))
    secondary_peak_height = float(
        config.get('KWS.prepro', 'secondary_peak_height'))
    window_width = int(config.get('KWS.features', 'window_width'))
    step_size = int(config.get('KWS.features', 'step_size'))
    blocks = int(config.get('KWS.features', 'number_of_blocks'))
    svgs.sort()
    imgs.sort()

    for svgp, imgp in zip(svgs, imgs):
        svgid = os.path.basename(svgp).replace('.svg', '')
        imgid = os.path.basename(imgp).replace('.jpg', '')
        print('\t%s\n\t%s' % (svgp, imgp))

        if svgid != imgid:
            raise IOError(
                'the id\'s of the image file (%s) and the svg file (%s) are not the same'
                % (svgid, imgid))

        trans = get_transcription(svgid)

        print('\tdoc id: %s' % svgid)
        wids, paths = parse_svg(svgp)
        img = imread(imgp)
        for wid, path in zip(wids, paths):
            print('\tword id: %s' % wid)

            if retake and (processed.count(wid) == 1):
                print('\talready processed')
                continue

            # look up the corresponding word
            if saveimgs:
                imgfile = wid
                word = get_word(wid, data=trans)
                if word is not None:
                    imgfile = word.code2string() + '_' + imgfile
            else:
                imgfile = None

            # get the word image
            poly = path2polygon(path)
            roi = crop(img, poly)

            pre, sym = word_preprocessor(roi,
                                         threshold=threshold,
                                         rel_height=relative_height,
                                         skew_res=skew_resolution,
                                         ppw=primary_peak_height,
                                         spw=secondary_peak_height,
                                         save=imgfile)

            if type(pre) is str:
                print('\tpre-processing failed\n\t\t%s' % pre)
                continue

            fea = compute_features(pre,
                                   window_width=window_width,
                                   step_size=step_size,
                                   blocks=blocks)

            write_word_features(txtp, wid, fea,
                                [pre.shape[0], pre.shape[1], sym])
            print('...')
コード例 #26
0
    def get_chara(self):
        """Retrieves chara name for current player in current frame.

        Compare cropped avatar with reference avatars, pick the best match as 
        the chara current player plays with. In OWL, currently observed player
        has a larger avatar. To differentiate between the two, comparison has
        to run twice and the better match gets chosen.

        Author:
            Appcell

        Args:
            None

        Returns:
            None 
        """
        all_avatars = self.frame.get_avatars(self.index)
        avatars_ref = all_avatars["normal"]
        avatars_small_ref = all_avatars["small"]
        team_color = avatars_ref['ana'][0, 0]

        # Crop avatar from frame
        avatar = ImageUtils.crop(
            self.image,
            OW.get_avatar_pos(self.index)[self.frame.game.game_type])
        avatar_small = ImageUtils.crop(
            avatar, [4, avatar.shape[0] - 4, 0, avatar.shape[1]])

        # If player is observed, not sure about this tho
        avatar_diff = ImageUtils.crop(
            self.image,
            OW.get_avatar_diff_pos(self.index)[self.frame.game.game_type])
        max_diff = 0
        for i in range(avatar_diff.shape[0]):
            for j in range(avatar_diff.shape[1]):
                if ImageUtils.color_distance(avatar_diff[i, j],
                                             team_color) > max_diff:
                    max_diff = ImageUtils.color_distance(
                        avatar_diff[i, j], team_color)
        if max_diff < 40 and self.is_ult_ready is False:
            self.is_observed = True
        score = 0
        for (name, avatar_ref) in avatars_ref.iteritems():
            s = cv2.matchTemplate(avatar, avatar_ref, cv2.TM_CCOEFF_NORMED)
            _, s, _, loc1 = cv2.minMaxLoc(s)
            temp_avatar = ImageUtils.crop(
                avatar,
                [loc1[1], avatar_ref.shape[0], loc1[0], avatar_ref.shape[1]])
            s_ssim1 = measure.compare_ssim(temp_avatar,
                                           avatar_ref,
                                           multichannel=True)
            s_small = cv2.matchTemplate(avatar_small, avatars_small_ref[name],
                                        cv2.TM_CCOEFF_NORMED)
            _, s_small, _, loc2 = cv2.minMaxLoc(s_small)
            temp_avatar2 = ImageUtils.crop(avatar_small, [
                loc2[1], avatars_small_ref[name].shape[0], loc2[0],
                avatars_small_ref[name].shape[1]
            ])
            s_ssim2 = measure.compare_ssim(temp_avatar2,
                                           avatars_small_ref[name],
                                           multichannel=True)
            s_ssim = s_ssim1 if s > s_small else s_ssim2
            s_final = s if s > s_small else s_small
            loc = loc1 if s > s_small else loc2

            if s_final * 0.4 + s_ssim * 0.6 > score:
                score = s_final * 0.4 + s_ssim * 0.6
                self.chara = name

        if self.chara is None:
            self.chara = "empty"
            self.is_dead = True
            return

        self.get_living_status(avatars_ref[self.chara])
コード例 #27
0
    def get_ability_and_assists(self):
        """Retrieve info of ability and assisting players in a row

        If distance between 2 avatar icons, width of arrow icon removed, is
        not divisible by width of an assist icon, then an ability icon must
        exist somewhere. Cut it off and compare with all possible ability
        icons to find the best match.

        After removing ability icon & arrow icon, what's left between 2
        avatars must be n assist icons. Cut each from killfeed image, and then
        compare with references. Pick the one with maximum score as result.

        All results are written into self.ability and self.assists.

        Author:
            Appcell

        Args:
            None

        Returns:
            None

        """
        if self.player1['pos'] == -1 or self.player2['pos'] == -1:
            return

        distance = self.player2[
            'pos'] - self.player1['pos'] - OW.KILLFEED_ICON_WIDTH[self.game_type]
        gap = ImageUtils.crop(
            self.image_with_gap,
            [0, self.image_with_gap.shape[0], self.player1['pos'], distance])

        ability_icon = ImageUtils.crop(
            self.image_with_gap,
            OW.get_ability_icon_pos(self.player2['pos'])[self.game_type])

        # Error gets too much with lowQ videos. Use edge detection instead.
        # Honestly it's not the best choice, since for non-OWL videos it 
        # doesn't work anymore. But again, for non-OWL videos we expect a 
        # better resolution.

        edge_image = cv2.Canny(self.image, 100, 200)

        # Get the "spanned" edge image.
        roi_x_min = self.player1['pos'] + OW.KILLFEED_ICON_WIDTH[self.game_type] + 4
        roi_x_max = self.player2['pos'] - OW.ABILITY_GAP_NORMAL[self.game_type]

        if roi_x_max - roi_x_min < OW.ASSIST_GAP[self.game_type]:
            return

        edge_span = (np.sum(edge_image, 0) / 255)[roi_x_min:roi_x_max]
        edges = list(filter(
            lambda i: edge_span[i] >= self.image.shape[0] * 0.7, 
            range(0, roi_x_max - roi_x_min)))
        if not edges:
            # Assist avatar doesn't exist
            return
        edge = edges[-1]  # The end of assist avatars list
        
        assist_num = int(round(float(edge) / OW.ASSIST_GAP[self.game_type]))
        ability_list = OW.ABILITY_LIST[self.player1['chara']]
        ability_icons_ref = self.frame.game.ability_icons_ref[
            self.player1['chara']]

        if (distance - OW.ABILITY_GAP_NORMAL[self.game_type]) % OW.ASSIST_GAP[self.game_type] > 5:
            # Has ability icon
            max_prob = -10
            for (ind, ability_index) in enumerate(ability_list):
                filtered_icon = self._preprocess_ability_icon(ability_icon)
                score = measure.compare_ssim(
                    filtered_icon, 
                    ability_icons_ref[ind], 
                    multichannel=True)
                if score > max_prob:
                    max_prob = score
                    self.ability = ability_index

            if max_prob < 0.1 and self.player1['chara'] == OW.GENJI:
                self.ability = OW.ABILITY_E

        for i in range(assist_num):
            # TODO: write this into ow.py!
            assist_icon = ImageUtils.crop(
                self.image,
                [
                    OW.ABILITY_ICON_Y_MIN[self.game_type],
                    OW.ASSIST_ICON_HEIGHT[self.game_type],
                    8 + self.player1['pos'] + i * OW.ASSIST_GAP[self.game_type] \
                        + OW.KILLFEED_ICON_WIDTH[self.game_type],
                    OW.ASSIST_ICON_WIDTH[self.game_type]])
            assist = {
                "chara": "empty",
                "player": "empty",
                "team": self.player1['team']
            }
            max_score = -10
            for (chara, icon) in self.frame.game.assist_icons_ref.iteritems():
                score = measure.compare_ssim(assist_icon, 
                                             icon, multichannel=True)
                if score > max_score:
                    max_score = score
                    assist['chara'] = chara

            self.assists.append(self._set_assist_info(assist))
コード例 #28
0
    def get_ult_charge(self):
        """Retrieves ultimate charge for current player.

        Author:

        Args:
            None

        Returns:
            None
        """
        if self.is_ult_ready:
            self.ult_charge = 100
            return
        if self.is_dead:
            return

        ult_charge_pre_pos = OW.get_ult_charge_pre_pos(
            self.index)[self.frame.game.game_type]
        ult_charge_pre_image = ImageUtils.rgb_to_gray(
            ImageUtils.crop(self.image, ult_charge_pre_pos))

        ult_charge_shear = ImageUtils.shear(
            ult_charge_pre_image,
            OW.get_tf_shear(self.index)[self.frame.game.game_type])

        ult_charges = [0, 0]

        # Here's another thought: we need to find the gap more intellectually,
        # not relying only on fixed position.
        # In detail, after shearing, find the gap by telling if there are more
        # than 2 colors in same column.
        ult_charge_image = ImageUtils.crop(
            ult_charge_shear,
            OW.get_ult_charge_pos(self.index)[self.frame.game.game_type])

        # TODO: I see there's no difference at all of brightness deviation!!
        # Our contrast adjusting must be seriously problematic. For grayscale
        # img, a simple normalization based on std would do.
        # ult_charge_image_g = ImageUtils.contrast_adjust_log(
        #     ult_charge_image, OW.ULT_ADJUST_LOG_INDEX)
        ult_charge_image_g = ImageUtils.normalize_gray(ult_charge_image)

        # tell if player is observed (more accurate than previous)
        # Here I use another local variable flag_observed, since the global one
        # might be inaccurate
        flag_observed = False
        deviation_row = ult_charge_image_g.max(
            axis=1) - ult_charge_image_g.min(axis=1)
        if deviation_row[2] - deviation_row[0] > \
            OW.ULT_GAP_DEVIATION_LIMIT[self.frame.game.game_type]:
            self.is_observed = True
            flag_observed = True

        # If current player is observed, there's a white dot on right side
        # needs to be removed.
        # TODO: write this into ow.py as well
        if flag_observed is True:
            ult_charge_image_g = ImageUtils.crop(ult_charge_image_g, [
                0, ult_charge_image_g.shape[0], 0,
                ult_charge_image_g.shape[1] - 5
            ])
        width = ult_charge_image_g.shape[1]
        height = ult_charge_image_g.shape[0]

        # Find the gap
        deviation = ult_charge_image_g.max(axis=0) - ult_charge_image_g.min(
            axis=0)
        gap = -1
        for i in range(width - 4, 3, -1):
            if deviation[i-3] - deviation[i] \
                > OW.ULT_GAP_DEVIATION_LIMIT[self.frame.game.game_type] \
                and deviation[i+3] - deviation[i] \
                > OW.ULT_GAP_DEVIATION_LIMIT[self.frame.game.game_type]:
                gap = i
                break

        bg_color = ult_charge_image_g[:, 0].mean()

        if bg_color < 0.6:
            # Dark background
            ult_charge_image_g = ImageUtils.inverse_gray(ult_charge_image_g)
        # No need to switch to BW here.

        if gap == -1:
            # Only one digit
            num = ImageUtils.remove_digit_vertical_edge(
                ult_charge_image_g,
                OW.ULT_GAP_DEVIATION_LIMIT[self.frame.game.game_type],
                ImageUtils.REMOVE_NUMBER_VERTICAL_EDGE_BOTH)
        else:
            # 2 digits
            num_left = ImageUtils.crop(
                ult_charge_image_g,
                [0, ult_charge_image_g.shape[0], 0, gap + 1])
            num_right = ImageUtils.crop(ult_charge_image_g, [
                0, ult_charge_image_g.shape[0], gap,
                ult_charge_image_g.shape[1] - gap
            ])

            if flag_observed is True:
                num_left = ImageUtils.crop(
                    num_left,
                    [0, num_left.shape[0], num_left.shape[1] \
                        - OW.ULT_CHARGE_NUMBER_WIDTH_OBSERVED[self.frame.game.game_type] - 1,
                     OW.ULT_CHARGE_NUMBER_WIDTH_OBSERVED[self.frame.game.game_type]])
                num_right = ImageUtils.crop(num_right, [
                    0, num_left.shape[0], 0, OW.
                    ULT_CHARGE_NUMBER_WIDTH_OBSERVED[self.frame.game.game_type]
                ])
            else:
                num_left = ImageUtils.crop(
                    num_left,
                    [0, num_left.shape[0], num_left.shape[1] \
                        - OW.ULT_CHARGE_NUMBER_WIDTH_OBSERVED[self.frame.game.game_type] - 1,
                     OW.ULT_CHARGE_NUMBER_WIDTH_OBSERVED[self.frame.game.game_type]])
                num_right = ImageUtils.crop(num_right, [
                    0, num_left.shape[0], 0, OW.
                    ULT_CHARGE_NUMBER_WIDTH_OBSERVED[self.frame.game.game_type]
                ])

            # if self.index == 5:
            #     cv2.imshow('t1', num_left)
            #     cv2.waitKey(0)
            #     cv2.imshow('t2', num_right)
            #     cv2.waitKey(0)
            # Since when cropping img we also included the slope on left side,
            # num_left could actually be empty
            # Also we need another recognition method. Simple MSE wouldn't work due to error.

        # ult_charge_image_g = ImageUtils.contrast_adjust_log(
        #     ult_charge_image, OW.ULT_ADJUST_LOG_INDEX)

        # for i in (0,1):

        #     cv2.imshow('t', ult_charge_image)
        #     cv2.waitKey(0)
        #     try:
        #         ult_charge_image_binary = ImageUtils.binary_otsu(ult_charge_image_g)

        #     except ValueError:
        #         self.ult_charge = None
        #         return
        #     ult_charge_similarities = np.zeros(11)
        #     for j in range(1 - i, 11-i):
        #         # 1st number can't be 0, 2nd number can't be empty
        #         ult_charge_ref = self.frame.game.ult_charge_numbers_ref[j - i]
        #         ult_charge_similarities[j] = ImageUtils.similarity(ult_charge_ref, ult_charge_image_binary)
        #     ult_charges[i] = np.argmax(ult_charge_similarities)
        #     print ult_charges[i]
        #     if ult_charges[i] == 10:
        #         ult_charges[i] = 0

        self.ult_charge = ult_charges[0] * 10 + ult_charges[1]
        return