Esempio n. 1
0
def mask_to_gray(img, mask):
    NDARRAY_ASSERT(img, ndim=3, dtype=np.uint8)
    NDARRAY_ASSERT(mask, ndim=2, dtype=np.bool)
    SAME_SHAPE_ASSERT(img, mask, ignore_ndim=True)
    
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    
    hsv[mask == False, 1] = 0
    
    dst = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
    
    return dst
def detect_road_damage_1(result, road_mask, logger=None):
    """
    建物被害の結果から道路上の被害抽出を行う
    
    Parameters
    ----------
    result : numpy.ndarray
        建物被害の結果
        黒を背景、白を被害領域として
        2値化されている必要がある
    
    road_mask : numpy.ndarray
        道路マスク画像
        黒を背景、白を道路領域として
        2値化されている必要がある
    
    logger : ImageLogger, default is None
        処理途中の画像をロギングする ImageLogger
    
    Returns
    -------
    numpy.ndarray
        道路上の被害抽出結果
        
    Notes
    -----
    - `result` と `road_mask` は同じ大きさである必要がある
    """

    NDARRAY_ASSERT(result, ndim=2, dtype=np.bool)
    NDARRAY_ASSERT(road_mask, ndim=2, dtype=np.bool)
    SAME_SHAPE_ASSERT(result, road_mask)

    # 1. 建物被害結果に道路マスクを適用

    result_extracted = result * road_mask
    result_extracted = (result_extracted * 255).astype(np.uint8)

    if logger:
        logger.logging_img(result_extracted, "result_extracted")

    # 2. 1. の画像から距離画像を作成する
    dist = cv2.distanceTransform(result_extracted, cv2.DIST_L2, maskSize=5)
    # FIXED: Normalize
    # dist = (dist / dist.max() * 255).astype(np.uint8)

    if logger:
        logger.logging_img(dist, "distance", cmap="gray")
        logger.logging_img(dist, "distance_visualized", cmap="jet")

    return dist
Esempio n. 3
0
def unsharp( img, k=1.0, ksize=3 ):
    """
    鮮鋭化 (アンシャープマスク)
    
    Parameters
    ----------
    img : numpy.ndarray
        入力画像
    k : int or float
        鮮鋭化の強さ
    ksize : int, odd number
        カーネルの大きさ
        奇数である必要がある

    Returns
    -------
    numpy.ndarray
        処理結果画像
    """
    NDARRAY_ASSERT( img, dtype=np.uint8 )
    TYPE_ASSERT( k, [int, float] )
    TYPE_ASSERT( ksize, int )
    assert ksize % 2 != 0, "'ksize' must be odd number"
    
    kern = np.full( (ksize, ksize), -k / (ksize * ksize), dtype=np.float32 )
    
    kern[ksize // 2, ksize // 2] = 1 + k - 1 / (ksize * ksize) * k
    
    return cv2.filter2D( img, cv2.CV_8U, kern )
Esempio n. 4
0
    def binalization_labels(self,
                            labels,
                            thresh=0,
                            dtype=np.uint8,
                            max_val=255):
        """
        ラベリング結果の2値化処理
        閾値 thresh 以下の値を 0、閾値 thresh より大きい値を max_val にする
        
        Parameters
        ----------
        labels : numpy.ndarray
            ラベリング結果データ
        thresh : int, default 0
            閾値
        dtype : type, default np.uint8
            2値化結果のデータ型
        max_val : int, default 255
            2値化の際の最大値

        Returns
        -------
        numpy.ndarray
            2値化済みのデータ
        """
        NDARRAY_ASSERT(labels, ndim=2)

        bin_img = np.zeros(labels.shape, dtype=dtype)

        bin_img[labels > thresh] = max_val

        return bin_img
Esempio n. 5
0
def zoom_to_img_size(img, shape):
    """
    画像と同じ大きさになるように拡大する
    
    - `img` を `shape` で指定したサイズに拡大する
    
    Parameters
    ----------
    img : numpy.ndarray
        入力画像
    shape : tuple of int
        画像サイズ

    Returns
    -------
    numpy.ndarray
        拡大された画像
    """
    
    NDARRAY_ASSERT(img)
    TYPE_ASSERT(shape, tuple)
    
    if img.shape[:2] == shape:
        return img
    
    return ndi.zoom(
        img,
        (shape[0] / img.shape[0], shape[1] / img.shape[1]),
        order=0,
        mode='nearest'
    )
Esempio n. 6
0
    def _remove_tiny_area(img, threshold_area=50):
        """
        微小領域の削除
        
        Parameters
        ----------
        img : numpy.ndarray
            入力画像 (8-bit 二値化画像)
        threshold_area : int, default 50
            面積の閾値
        
        Returns
        -------
        thresholded : numpy.ndarray
            閾値以下の面積を持つ領域を除去した画像

        """

        NDARRAY_ASSERT(img, dtype=np.uint8)

        assert check_if_binary_image(img), \
            "argument 'img' must be binary image"

        _, labels, stats, _ = cv2.connectedComponentsWithStats(img)

        areas = stats[:, cv2.CC_STAT_AREA]

        area_image = labels.copy()
        for label_num, area in enumerate(areas):
            area_image[area_image == label_num] = area

        thresholded = (area_image != areas[0]) & (area_image > threshold_area)

        return thresholded
Esempio n. 7
0
def calc_hsv_metrics_by_ground_truth(img, ground_truth, gt_type="GT_ORANGE"):
    NDARRAY_ASSERT(img, ndim=3, dtype=np.uint8)
    SAME_SHAPE_ASSERT(img, ground_truth, ignore_ndim=True)

    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    ground_truth = gen_ground_truth_by_type(ground_truth, gt_type)

    h, s, v = [hsv[:, :, i][ground_truth == True] for i in range(3)]

    metrics = {
        k: {
            "min": ch.min(),
            "max": ch.max(),
            "mean": np.mean(ch),
            "median": np.median(ch),
            "stddev": np.std(ch),
            "var": np.var(ch)
        }
        for k, ch in {
            "H": h,
            "S": s,
            "V": v
        }.items()
    }

    return metrics
Esempio n. 8
0
def hsv_blending(bg_img, fg_img, bg_v_scale=None, fg_v_scale=None):
    NDARRAY_ASSERT(fg_img, ndim=3)
    SAME_SHAPE_ASSERT(bg_img, fg_img)
    
    if bg_img.ndim == 3:
        bg_img = cv2.cvtColor(
            bg_img,
            cv2.COLOR_BGR2GRAY
        )
    
    if bg_img.ndim == 2:
        bg_img = cv2.cvtColor(
            bg_img,
            cv2.COLOR_GRAY2BGR
        )
    
    bg_hsv, fg_hsv = [
        cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        for img in [bg_img, fg_img]
    ]
    
    b_h, b_s, b_v = [bg_hsv[:, :, i] for i in range(3)]
    f_h, f_s, f_v = [fg_hsv[:, :, i] for i in range(3)]
    
    if bg_v_scale is not None:
        fg_not_masked = np.all(fg_img == C_BLACK, axis=2)
        
        b_v = b_v.astype(np.float32)
        
        b_v[fg_not_masked == True] = (b_v[fg_not_masked == True] * bg_v_scale)
        
        # Clip value
        b_v[b_v > 255.0] = 255
        
        # Cast Type
        b_v = b_v.astype(np.uint8)
    
    if fg_v_scale is not None:
        fg_mask = ~np.all(fg_img == C_BLACK, axis=2)
    
        b_v = b_v.astype(np.float32)
    
        b_v[fg_mask == True] = (b_v[fg_mask == True] * fg_v_scale)
    
        # Clip value
        b_v[b_v > 255.0] = 255
    
        # Cast Type
        b_v = b_v.astype(np.uint8)
        
    
    dst = cv2.cvtColor(
        np.dstack(
            [f_h, f_s, b_v]
        ),
        cv2.COLOR_HSV2BGR
    )
    
    return dst
Esempio n. 9
0
def evaluation_by_confusion_matrix(result, ground_truth):
    """
    混同行列 (Confusion-Matrix) を用いた
    精度評価
    
    Notes
    -----
    - `result` と `ground_truth` は bool 型で
      同じサイズの行列である必要がある

    Parameters
    ----------
    result : numpy.ndarray
        出力結果
    ground_truth : numpy.ndarray
        正解データ

    Returns
    -------
    tuple
        混同行列と各種スコアのタプル: (confusion_matrix, metrics)
    """
    NDARRAY_ASSERT(result, dtype=bool)
    NDARRAY_ASSERT(ground_truth, dtype=bool)
    SAME_SHAPE_ASSERT(result, ground_truth, ignore_ndim=True)

    for data in [result, ground_truth]:
        if data.ndim == 3:
            data = np.any(data, axis=2)

    TP = np.count_nonzero(result & ground_truth)
    FP = np.count_nonzero(result & (ground_truth == False))
    FN = np.count_nonzero((result == False) & ground_truth)
    TN = np.count_nonzero((result == False) & (ground_truth == False))

    confusion_matrix = {
        "TP": TP,
        "FP": FP,
        "FN": FN,
        "TN": TN,
    }

    metrics = calculate_metrics(confusion_matrix)

    return confusion_matrix, metrics
Esempio n. 10
0
def apply_road_mask(img, mask, bg_value=0):
    NDARRAY_ASSERT(img)
    NDARRAY_ASSERT(mask, ndim=2)
    SAME_SHAPE_ASSERT(img, mask, ignore_ndim=True)

    assert check_if_binary_image(mask), \
        "'mask' must be binary image"

    masked = img.copy()
    if img.ndim == 2:
        masked[mask == mask.max()] = bg_value.astype(masked.dtype)
    else:
        masked[mask == mask.max(), :] = np.full(
            masked.shape[2],
            fill_value=bg_value,
            dtype=masked.dtype
        )

    return masked
Esempio n. 11
0
    def __init__(self, img, logger=None):
        """
        コンストラクタ
        
        Parameters
        ----------
        img : numpy.ndarray
            入力画像
            - グレースケール画像
        logger : ImageLogger, defaults None
            処理途中の画像をロギングする ImageLogger
        """
        super().__init__()

        NDARRAY_ASSERT(img, ndim=2)
        assert img.dtype in [np.uint8, bool], \
            "'img.dtype' must be 'np.uint8' or 'bool'"

        self.BG, self.FG = -1, -1
        self.img = img.copy()

        # FIXED: Normalize
        # if self.img.dtype != np.uint8:
        #     self.img = (img / img.max()).astype(np.uint8)

        if np.unique(self.img).size == 2:
            self.BG, self.FG = np.unique(self.img)

        self.classified = np.full(img.shape[:2],
                                  fill_value=self.LABELS["UNDEFINED"],
                                  dtype=np.uint8)

        self.metrics = {
            "total_length": 0,
            "average_length": 0,
        }

        # Generate Rotated template
        self.TEMPLATE = {
            template_name:
            sum([[np.rot90(template, n_rotate) for n_rotate in range(4)]
                 for template in templates], [])
            for template_name, templates in self.BASE_TEMPLATES.items()
        }

        self.logger = logger
Esempio n. 12
0
def merge_arrays_by_mask(array_1, array_2, mask):
    NDARRAY_ASSERT(mask, ndim=2, dtype=np.bool)
    SAME_NDIM_ASSERT(array_1, array_2)
    SAME_SHAPE_ASSERT(array_1, array_2)
    SAME_SHAPE_ASSERT(array_1, mask, ignore_ndim=True)
    
    array_1 = array_1.copy()
    array_2 = array_2.copy()
    
    Z = np.zeros(
        (1 if array_1.ndim == 2 else array_1.shape[2],),
        dtype=array_1.dtype
    )

    array_1[mask == True] = Z
    array_2[mask == False] = Z
    
    return array_1 + array_2
Esempio n. 13
0
def _check_arrays(I, M):
    NDARRAY_ASSERT(I, dtype=np.uint8)
    NDARRAY_ASSERT(M, dtype=np.uint8)

    SAME_SHAPE_ASSERT(I, M, ignore_ndim=True)
Esempio n. 14
0
def eval_by_utilize_methods():
    ROOT_DIR_RESULT = "/Users/popunbom/Google Drive/情報学部/研究/修士/最終発表/Thesis/img/result"
    
    SPLIT_PATTERNS = {
        "MEAN_SHIFT"                                      : (0,),
        "ANGLE_VARIANCE"                                  : (1,),
        "PIXEL_CLASSIFY"                                  : (2,),
        "MEAN_SHIFT_AND_ANGLE_VARIANCE"                   : (0, 1),
        "MEAN_SHIFT_AND_PIXEL_CLASSIFY"                   : (0, 2),
        "ANGLE_VARIANCE_AND_PIXEL_CLASSIFY"               : (1, 2),
        "MEAN_SHIFT_AND_ANGLE_VARIANCE_AND_PIXEL_CLASSIFY": (0, 1, 2)
    }
    
    result_paths = sorted([
        join(dir_path, "result.png")
        for (dir_path, dir_names, file_names) in walk(ROOT_DIR_RESULT)
        if "result.png" in file_names
    ])
    
    for result_path in result_paths:
        exp_num, = re.match(
            r".*aerial_roi([0-9]).*",
            result_path
        ).groups()
        
        result_src = imread_with_error(
            result_path
        )
        
        gt_src = imread_with_error(
            join(
                ROOT_DIR_GT,
                f"aerial_roi{exp_num}.png"
            )
        )
        
        result_dir = join(
            dirname(result_path),
            "evaluation"
        )
        if not exists(result_dir):
            makedirs(result_dir)
        
        NDARRAY_ASSERT(result_src, ndim=3)
        
        print("File:", result_path)
        print(
            np.unique(
                result_src.reshape(result_src.shape[0] * result_src.shape[1], result_src.shape[2]),
                axis=0
            )
        )
        
        # Use Vegetation Mask
        OPTIONS = ["NORMAL"]
        
        path_vegetation_mask = join(
            ROOT_DIR_VEG_MASK,
            f"aerial_roi{exp_num}.png"
        )
        vegetation_mask = None
        
        if exists(path_vegetation_mask):
            vegetation_mask = imread_with_error(
                path_vegetation_mask,
                cv2.IMREAD_GRAYSCALE
            ).astype(bool)
            OPTIONS.append("REMOVED_VEGETATION")
            
        
        scores = dict()
        
        for option in OPTIONS:
        
            for name, channels in SPLIT_PATTERNS.items():
                eprint("Evaluation:", name)
                
                result = np.zeros(result_src.shape[:2], dtype=np.int16)
                
                for channel in channels:
                    result += (result_src[:, :, channel] != 0)

                result = result.astype(bool)

                if option == "NORMAL":
                    save_dir = result_dir
                elif option == "REMOVED_VEGETATION":
                    result = result & ~vegetation_mask
                    save_dir = join(
                        result_dir,
                        "removed_vegetation"
                    )
                
                if not exists(save_dir):
                    makedirs(save_dir)
    
                imwrite_with_error(
                    join(save_dir, name.replace(" & ", "_and_") + ".png"),
                    (result * 255).astype(np.uint8)
                )
                
                scores[name] = dict()
                
                for gt_type in ["GT_BOTH", "GT_ORANGE", "GT_RED"]:
                    ground_truth = None
                    
                    if gt_type == "GT_BOTH":
                        ground_truth = np.all(
                            (gt_src == C_RED) | (gt_src == C_ORANGE),
                            axis=2
                        )
                    elif gt_type == "GT_RED":
                        ground_truth = np.all(
                            gt_src == C_RED,
                            axis=2
                        )
                    elif gt_type == "GT_ORANGE":
                        ground_truth = np.all(
                            gt_src == C_ORANGE,
                            axis=2
                        )
                    
                    cm, metrics = evaluation_by_confusion_matrix(
                        result,
                        ground_truth
                    )
                    
                    scores[name][gt_type] = {
                        "Confusion Matrix": cm,
                        "Score"           : metrics
                    }
            
            
            json.dump(
                scores,
                open(join(save_dir, "scores.json"), "w"),
                ensure_ascii=False,
                sort_keys=True,
                indent="\t"
            )
Esempio n. 15
0
    def calc_edge_angle_variance(img, window_size=33, step=1, logger=None):
        """
        エッジ角度分散を計算

        - 入力画像に対し、エッジ抽出を行う
        - ウィンドウ処理を行い、局所領域内におけるエッジ角度の分散を求める

        Parameters
        ----------
        img : numpy.ndarray
            入力画像 (グレースケール画像)
        window_size : int, default 33
            ウィンドウ処理におけるウィンドウサイズ
        step : int, default 1
            ウィンドウ処理におけるずらし幅
        logger : ImageLogger, default None
            ImageLogger インスタンス

        Returns
        -------
        features : numpy.ndarray
            特徴量画像 (32-Bit float 画像)

        """

        # Check arguments
        NDARRAY_ASSERT(img, ndim=2)
        TYPE_ASSERT(window_size, int)
        TYPE_ASSERT(step, int)
        TYPE_ASSERT(logger, [None, ImageLogger])

        # Initialize variables
        edge_proc = EdgeProcedures(img)

        # Set parameters
        params = {"window_proc": {"window_size": window_size, "step": step}}

        # Calculate Edge Angle Variance in window
        features = edge_proc.get_feature_by_window(
            edge_proc.angle_variance_using_mean_vector,
            **params["window_proc"])

        # Scale feature image to the same size of input image
        features = zoom_to_img_size(
            # FIXED: Normalize
            features,
            img.shape)

        # Logging
        if isinstance(logger, ImageLogger):
            logger.logging_dict(params,
                                "params",
                                sub_path="edge_angle_variance")
            logger.logging_img(edge_proc.edge_magnitude,
                               "magnitude",
                               sub_path="edge_angle_variance")
            logger.logging_img(edge_proc.edge_angle,
                               "angle",
                               cmap="hsv",
                               sub_path="edge_angle_variance")
            logger.logging_img(edge_proc.get_angle_colorized_img(),
                               "angle_colorized",
                               sub_path="edge_angle_variance")
            logger.logging_img(features,
                               "angle_variance",
                               sub_path="edge_angle_variance")
            logger.logging_img(features,
                               "angle_variance",
                               cmap="jet",
                               sub_path="edge_angle_variance")

        return features
Esempio n. 16
0
    def high_pass_filter(img, freq=None, window_size=33, step=1, logger=None):
        """
        ハイパスフィルタを適用する

        - 画像に対し、離散フーリエ変換を行う
        - 周波数領域上で、ハイパスフィルタを適用する
        - フーリエ逆変換により、高周波領域を抽出する
        - ウィンドウ処理を行い、局所領域内における画素値の平均値を求める

        Parameters
        ----------
        img : numpy.ndarray
            入力画像 (8-Bit グレースケール画像)
        freq : float, default None
            ハイパスフィルタの周波数
        window_size : int, default 33
            ウィンドウ処理におけるウィンドウサイズ
        step : int, default 1
            ウィンドウ処理におけるずらし幅
        logger : ImageLogger, default None
            ImageLogger インスタンス

        Returns
        -------
        features : numpy.ndarray
            特徴量画像 (32-Bit float 画像)

        See Also
        --------
        ハイパスフィルタ
            - 円盤状のマスク画像を適用することで実現する
        """

        NDARRAY_ASSERT(img, ndim=2, dtype=np.uint8)
        TYPE_ASSERT(freq, [None, float])
        TYPE_ASSERT(logger, [None, ImageLogger])

        freq = freq or int(min(img.shape[:2]) * 0.05)

        # Set parameters
        params = {
            "freq": freq,
            "window_proc": {
                "window_size": window_size,
                "step": step
            }
        }

        # fft: 2-D Fourier Matrix
        fft = np.fft.fftshift(np.fft.fft2(img))

        # mask: High-pass mask
        mask = disk_mask(freq, *img.shape[:2])

        # Apply `mask` to `fft`
        # `mask` の値が '1' の部分を 0値(0+0j) にする
        fft_masked = fft.copy()
        fft_masked[mask] = 0 + 0j

        # i_fft: invert FFT
        i_fft = np.fft.ifft2(fft_masked)

        # Calculate Mean of pixel values in window
        features = compute_by_window(np.abs(i_fft),
                                     BuildingDamageExtractor._f_mean,
                                     dst_dtype=np.float64,
                                     **params["window_proc"])

        # Scale feature image to the same size of input image
        features = zoom_to_img_size(
            # FIXED: Normalize
            features,
            img.shape)

        # Logging
        if isinstance(logger, ImageLogger):
            logger.logging_dict(params, "params", sub_path="high_pass_filter")
            logger.logging_img(np.log10(np.abs(fft)),
                               "power_spectrum",
                               cmap="jet",
                               sub_path="high_pass_filter")
            logger.logging_img(mask,
                               "mask",
                               cmap="gray_r",
                               sub_path="high_pass_filter")
            logger.logging_img(np.log10(np.abs(fft_masked)),
                               "power_spectrum_masked",
                               cmap="jet",
                               sub_path="high_pass_filter")
            logger.logging_img(np.abs(i_fft),
                               "IFFT",
                               sub_path="high_pass_filter")
            logger.logging_img(features,
                               "HPF_gray",
                               sub_path="high_pass_filter")
            logger.logging_img(features,
                               "HPF_colorized",
                               cmap="jet",
                               sub_path="high_pass_filter")

        return features
Esempio n. 17
0
    def find_subtracted_thresholds(self,
                                   img_a,
                                   img_b,
                                   ground_truth,
                                   precision=10):
        """
        2画像間の差分結果の閾値計算を行う

        - 画像A, B それぞれで閾値処理
        - 各画像の最小値、最大値の間をパーセンタイルで分割する
        - A & not(B) を計算し、正解データと比較
        - F値が最も高くなるときの画像A, B の閾値を返却する


        Parameters
        ----------
        img_a, img_b :  numpy.ndarray
            入力画像 (グレースケール画像)
        ground_truth : numpy.ndarray
            正解データ (1-Bit)
        precision : int
            閾値計算の精度

        Returns
        -------
        reasonable_params : dict
            F値が最も高くなったときの閾値
        result : numpy.ndarray
            その際の閾値処理結果画像 (1-Bit 2値画像)

        Notes
        -----
        `ground_truth`:
            - 1-Bit (bool 型) 2値画像
            - 黒:背景、白:被害領域
        `precision`:
            - precision=N のとき、2N ずつにパーセンタイル分割を行う

        """
        NDARRAY_ASSERT(img_a, ndim=2)
        NDARRAY_ASSERT(img_b, ndim=2)
        NDARRAY_ASSERT(ground_truth, ndim=2, dtype=np.bool)
        SAME_SHAPE_ASSERT(img_a, img_b, ignore_ndim=False)
        SAME_SHAPE_ASSERT(img_a, ground_truth, ignore_ndim=False)

        # Initialize variables
        reasonable_params = {
            "Score": {
                "F Score": -1
            },
            "Confusion Matrix": None,
            "Range": None,
        }
        result = None

        # Calculate thresholds
        for q_a_low, q_a_high, q_b_low, q_b_high in tqdm(
                list(
                    product(np.linspace(50 / precision, 50, precision),
                            repeat=4))):

            # Generate result
            a_min, a_max = self._in_range_percentile(img_a,
                                                     (q_a_low, q_a_high))
            b_min, b_max = self._in_range_percentile(img_b,
                                                     (q_b_low, q_b_high))

            _result_a = (a_min < img_a) & (img_a < a_max)
            _result_b = (b_min < img_b) & (img_b < b_max)

            _result = _result_a & np.bitwise_not(_result_b)

            # Calculate scores
            _cm, _metrics = evaluation_by_confusion_matrix(
                _result, ground_truth)

            # Update reasonable_params
            if _metrics["F Score"] > reasonable_params["Score"]["F Score"]:
                reasonable_params = {
                    "Score": _metrics,
                    "Confusion Matrix": _cm,
                    "Range": {
                        "img_a": [a_min, a_max],
                        "img_b": [b_min, b_max],
                    }
                }
                result = _result.copy()

        # Logging
        if self.logger:
            self.logger.logging_dict(reasonable_params,
                                     f"params_subtracted_thresholds",
                                     sub_path=self.logger_sub_path)
            self.logger.logging_img(result,
                                    f"subtract_thresholded",
                                    sub_path=self.logger_sub_path)

        return reasonable_params, result
Esempio n. 18
0
    def meanshift_and_color_thresholding(
            self,
            func_mean_shift=cv2.pyrMeanShiftFiltering,
            params_mean_shift={
                "sp": 40,
                "sr": 50
            },
            retval_pos=None,
            skip_find_params=False):
        """
        建物被害検出: Mean-Shift による減色処理と色閾値処理

        - 入力画像に Mean-Shift による減色処理を適用する
        - 正解データをもとに、色空間での閾値を探索し、適用する
        - 閾値処理結果に対し、モルフォロジー処理による補正処理を行う

        Parameters
        ----------
        func_mean_shift : callable object
            Mean-Shift 処理関数
        params_mean_shift : dict
            Mean-Shift 処理関数に渡されるパラメータ
        retval_pos : int, default None
            Mean-Shift 処理関数の返却値が複数の場合、
            領域分割後の画像が格納されている位置を指定する

        Returns
        -------
        building_damage : numpy.ndarray
            被害抽出結果

        Notes
        -----
        `func_mean_shift`
            - 第1引数に画像を取れるようになっている必要がある
        `building_damage`
            - 1-Bit (bool 型) 2値画像
            - 黒:背景、白:被害抽出結果
        """

        img = self.img
        ground_truth = self.ground_truth
        logger = self.logger

        # Check arguments
        NDARRAY_ASSERT(img, ndim=3, dtype=np.uint8)
        NDARRAY_ASSERT(ground_truth, ndim=2, dtype=np.bool)
        SAME_SHAPE_ASSERT(img, ground_truth, ignore_ndim=True)

        TYPE_ASSERT(params_mean_shift, dict)

        if isinstance(func_mean_shift, str):
            func_mean_shift = eval(func_mean_shift)

        assert callable(func_mean_shift
                        ), "argument 'func_mean_shift' must be callable object"

        # Set parameters
        params = {
            "Mean-Shift": {
                "func":
                get_qualified_class_name(func_mean_shift,
                                         wrap_with_quotes=False),
                "params":
                params_mean_shift
            }
        }

        eprint("Pre-processing ({func_name}, {params}) ... ".format(
            func_name=params["Mean-Shift"]["func"],
            params=", ".join([
                f"{k}={v}" for k, v in params["Mean-Shift"]["params"].items()
            ])),
               end="")

        # Mean-Shift
        if retval_pos is None:
            smoothed = func_mean_shift(img, **params_mean_shift)
        else:
            smoothed = func_mean_shift(img, **params_mean_shift)[retval_pos]

        eprint("done !")

        # Logging
        if logger:
            logger.logging_img(smoothed, "filtered")
            logger.logging_dict(params, "detail_mean_shift")

        if not skip_find_params:
            params_finder = ParamsFinder(logger=logger)

            # Find: Color thresholds in HSV
            _, result = params_finder.find_color_threshold_in_hsv(
                img=smoothed,
                ground_truth=ground_truth,
            )

            # Find: Morphology processing
            _, building_damage = params_finder.find_reasonable_morphology(
                result_img=result,
                ground_truth=ground_truth,
            )

            # Logging
            if logger:
                logger.logging_img(building_damage, "building_damage")

            return building_damage

        return smoothed
Esempio n. 19
0
    def find_canny_thresholds(self, img, ground_truth):
        """
        Canny のアルゴリズムの閾値探索を行う

        Parameters
        ----------
        img : numpy.ndarray
            入力画像 (8−Bit グレースケール画像)
        ground_truth
            正解データ (1-Bit 画像)

        Returns
        -------
        reasonable_params : dict
            F値が最も高くなったときの閾値
        result : numpy.ndarray
            その際の閾値処理結果画像

        Notes
        -----
        `ground_truth`:
            - 1-Bit (bool 型) 2値画像
            - 黒:背景、白:被害領域
        """
        from skimage.feature import canny

        NDARRAY_ASSERT(img, ndim=2, dtype=np.uint8)
        NDARRAY_ASSERT(ground_truth, ndim=2, dtype=np.bool)
        SAME_SHAPE_ASSERT(img, ground_truth, ignore_ndim=False)

        # Initialize variables
        reasonable_params = {
            "Score": {
                "F Score": -1
            },
            "Confusion Matrix": None,
            "Thresholds": None,
        }
        result = None

        # Calculate thresholds
        for th_1, th_2 in tqdm(list(product(range(256), repeat=2))):

            # Generate result
            _result = canny(img, low_threshold=th_1, high_threshold=th_2)

            # Calculate scores
            _cm, _metrics = evaluation_by_confusion_matrix(
                _result, ground_truth)

            # Update reasonable_params
            if _metrics["F Score"] > reasonable_params["Score"]["F Score"]:
                reasonable_params = {
                    "Score": _metrics,
                    "Confusion Matrix": _cm,
                    "Thresholds": list([th_1, th_2])
                }
                result = _result.copy()

        if result.dtype != bool:
            result = (result * 255).astype(np.uint8)

        # Logging
        if self.logger:
            self.logger.logging_dict(reasonable_params,
                                     "canny_thresholds",
                                     sub_path=self.logger_sub_path)
            self.logger.logging_img(result,
                                    "canny",
                                    sub_path=self.logger_sub_path)

        return reasonable_params, result
Esempio n. 20
0
    def find_reasonable_morphology(self, result_img, ground_truth):
        """
        最適なモルフォロジー処理を模索

        - 正解データとの精度比較により、各種処理結果の補正として
          最適なモルフォロジー処理を模索する


        Parameters
        ----------
        result_img : numpy.ndarray
            処理結果画像
        ground_truth : numpy.ndarray
            正解データ

        Returns
        -------
        reasonable_params : dict
            導き出されたパラメータ
            - モルフォロジー処理のパターン
            - 適用時のスコア
        result : numpy.ndarray
            - モルフォロジー処理結果画像

        Notes
        -----
        `result_img`:
            - 1-Bit (bool 型) 2値画像
            - 黒:無被害、白:被害

        `ground_truth`:
            - 1-Bit (bool 型) 2値画像
            - 黒:背景、白:被害領域
        """

        # Check arguments
        NDARRAY_ASSERT(result_img, ndim=2, dtype=np.bool)
        NDARRAY_ASSERT(ground_truth, ndim=2, dtype=np.bool)
        SAME_SHAPE_ASSERT(result_img, ground_truth, ignore_ndim=True)

        # 処理の組み合わせパターン
        FIND_RANGE = {
            # カーネルの大きさ: 3x3, 5x5
            "Kernel Size": [3, 5],
            # 処理の対象: 4近傍, 8近傍
            "# of Neighbors": [4, 8],
            # モルフォロジー処理
            "Morphology Methods": ["ERODE", "DILATE", "OPEN", "CLOSE"],
            # 繰り返し回数
            "# of Iterations": range(1, 6)
        }

        # Convert input image to uint8 (for `cv2.morphologyEx`)
        result_img = (result_img * 255).astype(np.uint8)

        # Initialize variables
        reasonable_params = {
            "Confusion Matrix": dict(),
            "Score": {
                "F Score": -1,
            },
            "Params": {
                "Operation": "",
                "Kernel": {
                    "Size": (-1, -1),
                    "#_of_Neighbor": -1
                },
                "Iterations": -1
            }
        }
        result = None

        # Finding reasonable process
        for kernel_size in FIND_RANGE["Kernel Size"]:
            for n_neighbor in FIND_RANGE["# of Neighbors"]:
                for operation in FIND_RANGE["Morphology Methods"]:
                    for n_iterations in FIND_RANGE["# of Iterations"]:

                        # Set parameters
                        if n_neighbor == 4:
                            kernel = np.zeros((kernel_size, kernel_size),
                                              dtype=np.uint8)
                            kernel[kernel_size // 2, :] = 1
                            kernel[:, kernel_size // 2] = 1
                        else:
                            kernel = np.ones((kernel_size, kernel_size),
                                             dtype=np.uint8)

                        # Generate result
                        _result = cv2.morphologyEx(
                            src=result_img,
                            op=cv2.__dict__[f"MORPH_{operation}"],
                            kernel=kernel,
                            iterations=n_iterations).astype(bool)

                        # Calculate scores
                        _cm, _metrics = evaluation_by_confusion_matrix(
                            _result, ground_truth)

                        # Update reasonable_params
                        if _metrics["F Score"] > reasonable_params["Score"][
                                "F Score"]:

                            reasonable_params = {
                                "Confusion Matrix": _cm,
                                "Score": _metrics,
                                "Params": {
                                    "Operation": operation,
                                    "Kernel": {
                                        "Size": (kernel_size, kernel_size),
                                        "#_of_Neighbor": n_neighbor
                                    },
                                    "Iterations": n_iterations
                                }
                            }

                            result = _result.copy()

        # Logging
        if self.logger:
            self.logger.logging_img(result,
                                    "result_morphology",
                                    sub_path=self.logger_sub_path)
            self.logger.logging_dict(reasonable_params,
                                     "params_morphology",
                                     sub_path=self.logger_sub_path)

        return reasonable_params, result
Esempio n. 21
0
def detect_road_damage_2(result, road_mask, vegetation_mask=None, logger=None):
    """
    建物被害の結果から道路上の被害抽出を行う
    
    Parameters
    ----------
    result : numpy.ndarray
        建物被害の結果
        黒を背景、白を被害領域として
        2値化されている必要がある
    
    road_mask : numpy.ndarray
        道路マスク画像
        黒を背景、白を道路領域として
        2値化されている必要がある
    
    logger : ImageLogger, default is None
        処理途中の画像をロギングする ImageLogger
    
    Returns
    -------
    numpy.ndarray
        道路上の被害抽出結果
        
    Notes
    -----
    - `result` と `road_mask` は同じ大きさである必要がある
    """
    NDARRAY_ASSERT(result, ndim=2, dtype=np.bool)
    NDARRAY_ASSERT(road_mask, ndim=2, dtype=np.bool)
    SAME_SHAPE_ASSERT(result, road_mask)
    TYPE_ASSERT(vegetation_mask, [None, np.ndarray])

    if vegetation_mask is not None:
        NDARRAY_ASSERT(vegetation_mask, ndim=2, dtype=np.bool)
        result = result & ~vegetation_mask

    result = (result * 255).astype(np.uint8)

    dist = cv2.distanceTransform(result, cv2.DIST_L2, maskSize=5)
    # FIXED: Normalize
    # dist = (dist / dist.max() * 255).astype(np.uint8)

    logger_sub_path = "" if vegetation_mask is None else "removed_vegetation"
    if logger:
        logger.logging_img(dist,
                           "distance",
                           cmap="gray",
                           sub_path=logger_sub_path)
        logger.logging_img(dist,
                           "distance_visualized",
                           cmap="jet",
                           sub_path=logger_sub_path)

    result_extracted = dist * road_mask

    if logger:
        logger.logging_img(result_extracted,
                           "result_extracted",
                           sub_path=logger_sub_path)

    return result_extracted
Esempio n. 22
0
    def find_threshold(self,
                       img,
                       ground_truth,
                       precision=100,
                       logger_suffix=""):
        """
        正解データを用いて最適な閾値を求める

        - 各画像の最小値、最大値の間をパーセンタイルで分割し、閾値を生成
        - 閾値処理を行った結果を正解データと比較する
        - F値が最も高くなるときの閾値を返却する

        Parameters
        ----------
        img :  numpy.ndarray
            入力画像 (グレースケール画像)
        ground_truth : numpy.ndarray
            正解データ (1-Bit)
        precision : int, default 100
            閾値計算の精度
        logger_suffix : str, default ""
            ImageLogger によるロギングの際に画像が格納される
            フォルダの末尾につく文字列を指定する

        Returns
        -------
        reasonable_params : dict
            F値が最も高くなったときの閾値
        result : numpy.ndarray
            その際の閾値処理結果画像 (1-Bit 2値画像)

        Notes
        -----
        `ground_truth`:
            - 1-Bit (bool 型) 2値画像
            - 黒:背景、白:被害領域
        `precision`:
            - precision=N のとき、2N ずつにパーセンタイル分割を行う

        """

        NDARRAY_ASSERT(img, ndim=2)
        NDARRAY_ASSERT(ground_truth, ndim=2, dtype=np.bool)
        TYPE_ASSERT(logger_suffix, str, allow_empty=True)

        reasonable_params = {
            "Score": {
                "F Score": -1
            },
            "Confusion Matrix": None,
            "Range": None,
        }
        result = None

        for q_low, q_high in tqdm(
                list(
                    product(np.linspace(50 / precision, 50, precision),
                            repeat=2))):

            v_min, v_max = self._in_range_percentile(img, (q_low, q_high))

            _result = (v_min < img) & (img < v_max)

            _cm, _metrics = evaluation_by_confusion_matrix(
                _result, ground_truth)

            if _metrics["F Score"] > reasonable_params["Score"]["F Score"]:
                reasonable_params = {
                    "Score": _metrics,
                    "Confusion Matrix": _cm,
                    "Range": [v_min, v_max]
                }
                result = _result.copy()

        if result.dtype != bool:
            result = (result * 255).astype(np.uint8)

        if self.logger:
            self.logger.logging_dict(
                reasonable_params,
                f"params",
                sub_path="_".join(
                    [x for x in [self.logger_sub_path, logger_suffix] if x]))
            self.logger.logging_img(
                result,
                f"result_thresholded",
                sub_path="_".join(
                    [x for x in [self.logger_sub_path, logger_suffix] if x]))

        return reasonable_params, result
Esempio n. 23
0
    def find_color_threshold_in_hsv(self, img, ground_truth, precision=10):
        """
        HSV 色空間における色閾値探索

        - RGB → HSV 変換 を行う

        - HSV の各チャンネルで閾値処理を行い統合する

        - 正解データを用いて精度評価を行う


        Parameters
        ----------
        img : numpy.ndarray
            入力画像 (8-Bit RGB カラー)
        ground_truth : numpy.ndarray
            正解データ (1-Bit)
        precision : int
            閾値計算の精度


        Returns
        -------
        reasonable_params : dict
            F値が最も高くなったときの閾値
        result : numpy.ndarray
            その際の閾値処理結果画像 (1-Bit 2値画像)

        Notes
        -----
        `ground_truth`:
            - 1-Bit (bool 型) 2値画像
            - 黒:背景、白:被害領域
        `precision`:
            - precision=N のとき、H, S, V の各チャンネルに対し
              2N ずつにパーセンタイル分割を行う
        """

        global _worker_find_color_threshold_in_hsv

        # Worker methods executed parallel
        @worker_exception_raisable
        def _worker_find_color_threshold_in_hsv(_img, _masked, _q_h, _q_s):
            # Value used in tqdm
            _worker_id = current_process()._identity[0]
            _desc = f"Worker #{_worker_id:3d}"

            # Unpack arguments
            _q_h_low, _q_h_high = _q_h
            _q_s_low, _q_s_high = _q_s

            # Split image to each channne
            _img_h, _img_s, _img_v = [_img[:, :, i] for i in range(3)]
            _masked_h, _masked_s, _masked_v = [_masked[:, i] for i in range(3)]

            # Initialize variables
            reasonable_params = {
                "Score": {
                    "F Score": -1,
                },
                "Range": -1
            }

            # Find thresholds
            for _q_v_low, _q_v_high in tqdm(list(
                    product(np.linspace(50 / precision, 50, precision),
                            repeat=2)),
                                            desc=_desc,
                                            position=_worker_id,
                                            leave=False):

                # Generate result
                _h_min, _h_max = self._in_range_percentile(
                    _masked_h, (_q_h_low, _q_h_high))
                _s_min, _s_max = self._in_range_percentile(
                    _masked_s, (_q_s_low, _q_s_high))
                _v_min, _v_max = self._in_range_percentile(
                    _masked_v, (_q_v_low, _q_v_high))

                _result = (((_h_min <= _img_h) & (_img_h <= _h_max)) &
                           ((_s_min <= _img_s) & (_img_s <= _s_max)) &
                           ((_v_min <= _img_v) & (_img_v <= _v_max)))

                # Calculate score
                _cm, _metrics = evaluation_by_confusion_matrix(
                    _result, ground_truth)

                # Update reasonable_params
                if _metrics["F Score"] > reasonable_params["Score"]["F Score"]:
                    reasonable_params = {
                        "Score": _metrics,
                        "Confusion Matrix": _cm,
                        "Range": {
                            "H": (_h_min, _h_max, _q_h_low, _q_h_high),
                            "S": (_s_min, _s_max, _q_s_low, _q_s_high),
                            "V": (_v_min, _v_max, _q_v_low, _q_v_high),
                        }
                    }

            return reasonable_params

        # Check arguments
        NDARRAY_ASSERT(img, ndim=3, dtype=np.uint8)
        NDARRAY_ASSERT(ground_truth, ndim=2, dtype=np.bool)
        SAME_SHAPE_ASSERT(img, ground_truth, ignore_ndim=True)

        # Convert RGB -> HSV
        img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

        # `masked`: `img` masked by `ground_truth`
        masked = img[ground_truth]

        # Percentile Split
        Q = list(product(np.linspace(50 / precision, 50, precision), repeat=4))

        # `progress_bar`: whole progress bar
        progress_bar = tqdm(total=len(Q), position=0)

        def _update_progressbar(arg):
            progress_bar.update()

        # Initialize process pool
        cp = CustomPool()
        pool = cp.Pool(initializer=tqdm.set_lock, initargs=(tqdm.get_lock(), ))

        results = list()

        # Multi-Processing !
        for q_h_low, q_h_high, q_s_low, q_s_high in Q:
            results.append(
                pool.apply_async(_worker_find_color_threshold_in_hsv,
                                 args=(img, masked, (q_h_low, q_h_high),
                                       (q_s_low, q_s_high)),
                                 callback=_update_progressbar))
        pool.close()
        pool.join()
        cp.update()

        # Resolve results
        try:
            results = [result.get() for result in results]
        except Exception as e:
            print(e)

        # Get result whose F-Score is max in results
        reasonable_params = max(results, key=lambda e: e["Score"]["F Score"])

        img_h, img_s, img_v = [img[:, :, i] for i in range(3)]
        h_min, h_max, _, _ = reasonable_params["Range"]["H"]
        s_min, s_max, _, _ = reasonable_params["Range"]["S"]
        v_min, v_max, _, _ = reasonable_params["Range"]["V"]

        # Generate image using reasonable thresholds
        result = (((h_min <= img_h) & (img_h <= h_max)) &
                  ((s_min <= img_s) & (img_s <= s_max)) & ((v_min <= img_v) &
                                                           (img_v <= v_max)))

        # Logging
        if self.logger:
            self.logger.logging_dict(reasonable_params,
                                     "color_thresholds_in_hsv",
                                     sub_path=self.logger_sub_path)
            self.logger.logging_img(result,
                                    "meanshift_thresholded",
                                    sub_path=self.logger_sub_path)

        return reasonable_params, result