コード例 #1
0
def _make_csf_color_image(width=640,
                          height=480,
                          lv1=None,
                          lv2=None,
                          stripe_num=18):
    """
    長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
    入力信号レベルは10bitに限定する。

    Parameters
    ----------
    width : numeric.
        width of the pattern image.
    height : numeric.
        height of the pattern image.
    lv1 : numeric
        video level 1. this value must be 10bit.
    lv2 : numeric
        video level 2. this value must be 10bit.
    stripe_num : numeric
        number of the stripe.

    Returns
    -------
    array_like
        a cms pattern image.
    """
    width_list = cmn.equal_devision(width, stripe_num)
    height_list = cmn.equal_devision(height, stripe_num)
    h_pos_list = cmn.equal_devision(width // 2, stripe_num)
    v_pos_list = cmn.equal_devision(height // 2, stripe_num)
    lv1_16bit = lv1 * (2**6)
    lv2_16bit = lv2 * (2**6)
    img = np.zeros((height, width, 3), dtype=np.uint16)

    width_temp = width
    height_temp = height
    h_pos_temp = 0
    v_pos_temp = 0
    for idx in range(stripe_num):
        lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
        temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
        temp_img[:, :, 0] *= lv[0]
        temp_img[:, :, 1] *= lv[1]
        temp_img[:, :, 2] *= lv[2]
        ed_pos_h = h_pos_temp + width_temp
        ed_pos_v = v_pos_temp + height_temp
        img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
        width_temp -= width_list[stripe_num - 1 - idx]
        height_temp -= height_list[stripe_num - 1 - idx]
        h_pos_temp += h_pos_list[idx]
        v_pos_temp += v_pos_list[idx]

    return img
コード例 #2
0
def make_multi_crosshatch(width=1920, height=1080,
                          h_block=4, v_block=2,
                          fragment_width=64, fragment_height=64,
                          linewidth=1, linetype=cv2.LINE_AA,
                          bg_color_array=const_gray_array_lower,
                          fg_color_array=const_white_array,
                          angle=30, debug=False):
    """
    # 概要
    欲張って複数パターンのクロスハッチを1枚の画像に入れるようにした。
    複数パターンは bg_color_array, fg_color_array にリストとして記述する。

    # 注意事項
    bg_color_array, fg_color_array の shape は
    h_block * v_block の値と一致させること。
    さもないと冒頭のパラメータチェックで例外飛びます。
    """
    # parameter check
    # -----------------------
    if bg_color_array.shape[0] != (h_block * v_block):
        raise TypeError("bg_color_array.shape is invalid.")
    if fg_color_array.shape[0] != (h_block * v_block):
        raise TypeError("fg_color_array.shape is invalid.")

    # block_width = width // h_block
    # block_height = height // v_block
    block_width_array = common.equal_devision(width, h_block)
    block_height_array = common.equal_devision(height, v_block)

    v_img_list = []
    for v_idx in range(v_block):
        h_img_list = []
        block_height = block_height_array[v_idx] 
        for h_idx in range(h_block):
            block_width = block_width_array[h_idx]
            idx = (v_idx * h_block) + h_idx
            img = make_crosshatch(width=block_width, height=block_height,
                                  linewidth=linewidth, linetype=linetype,
                                  fragment_width=fragment_width,
                                  fragment_height=fragment_height,
                                  bg_color=bg_color_array[idx],
                                  fg_color=fg_color_array[idx],
                                  angle=angle)
            h_img_list.append(img)

        v_img_list.append(cv2.hconcat(h_img_list))
    img = cv2.vconcat((v_img_list))

    if debug:
        preview_image(img, 'rgb')

    return img
コード例 #3
0
def gen_csf_pattern(width=640, height=480, bar_num=17,
                    a=(32768, 32768, 0), b=(32768, 0, 32768),
                    dtype=np.uint16, debug=False):
    """
    # 概要
    CSF(Contrast Sensitivity Function) のパターンを作る
    """
    lut = [a, b]
    bar_length_list = common.equal_devision(width, bar_num)
    line_bar_list = []
    for bar_idx, length in enumerate(bar_length_list):
        # LUT値をトグルして1次元のbarの値を作る
        # -----------------------------------
        bar = [np.ones((length), dtype=dtype) * lut[bar_idx % 2][color]
               for color in range(3)]
        bar = np.dstack(bar)
        line_bar_list.append(bar)

    # a と b をトグルして作った bar を結合
    # -----------------------------------
    line = np.hstack(line_bar_list)

    # v方向にも stack して 1次元画像を2次元画像にする
    # --------------------------------------------
    img = line * np.ones((height, 1, 3))

    if debug:
        preview_image(img, 'rgb')

    return img
コード例 #4
0
def _make_block(width=640, height=480, level=[[900, 0, 0], [0, 800, 0]]):
    """
    level で指定した色で塗りつぶした長方形を
    V方向に積んだ画像を生成する。

    Parameters
    ----------
    width : numeric.
        total width
    height : numeric.
        total height
    level : array of the numeric or array_like
        values of red, green, blue.
        the bit depth must be 10bit.

    Returns
    -------
    array_like
        block image.

    """
    block_num = len(level)
    height_p = cmn.equal_devision(height, block_num)

    img_list = []
    for idx in range(len(level)):
        img = np.ones((height_p[idx], width, 3), dtype=np.uint16)
        img[:, :, 0] = level[idx][0]
        img[:, :, 1] = level[idx][1]
        img[:, :, 2] = level[idx][2]
        img_list.append(img)
    img = cv2.vconcat(img_list) * (2**6)

    return img
コード例 #5
0
ファイル: gamut_test_pattern.py プロジェクト: toru-ver4/sip
def composite_gamut_csf_pattern(base_img, patch_rgb, patch_num):
    img_width = base_img.shape[1]
    img_height = base_img.shape[0]

    width = int(img_width * GAMUT_PATTERN_AREA_WIDTH)
    height = img_height

    img = np.zeros((height, width, 3), dtype=np.uint16)

    h_num = patch_num
    v_num = 6  # RGBMYC

    left_space = int(img_width * GAMUT_LEFT_RIGHT_SPACE)
    top_space = int(img_height * GAMUT_TOP_BOTTOM_SPACE)

    patch_width = int(img_width * GAMUT_PATCH_SIZE)
    patch_height = patch_width

    ws_target_len = width - (2 * left_space) - patch_width * h_num
    width_space = cmn.equal_devision(ws_target_len, h_num - 1)
    width_space.insert(0, 0)

    hs_target_len = height - (2 * top_space) - patch_height * v_num
    height_space = cmn.equal_devision(hs_target_len, v_num - 1)
    height_space.insert(0, 0)

    v_ed = top_space
    for v_idx in range(v_num):
        v_st = v_ed + height_space[v_idx]
        v_ed = v_st + patch_height
        h_ed = left_space
        for h_idx in range(h_num):
            lv1 = patch_rgb[v_idx * h_num + h_num - 1] * 0xFFC0
            lv2 = patch_rgb[v_idx * h_num + h_idx] * 0xFFC0
            patch = tpg.get_csf_color_image(width=patch_width,
                                            height=patch_height,
                                            lv1=np.uint16(np.round(lv1)),
                                            lv2=np.uint16(np.round(lv2)),
                                            stripe_num=GAMUT_PATCH_STRIPE_NUM)
            h_st = h_ed + width_space[h_idx]
            h_ed = h_st + patch_width
            # print(v_st, v_ed, h_st, h_ed)
            # print(img[v_st:v_ed, h_st:h_ed, :].shape)
            img[v_st:v_ed, h_st:h_ed, :] = patch

    base_img[0:height, 0:width, :] = img
コード例 #6
0
    def test_equal_division(self):
        # normal pattern
        # -----------------
        result_no1 = [1] * 100
        eq_(common.equal_devision(100, 100), result_no1)
        result_no2 = [1] * 99 + [2]
        eq_(common.equal_devision(101, 100), result_no2)

        # minimum
        # ----------------
        result_no3 = [1]
        eq_(common.equal_devision(1, 1), result_no3)

        # abnormal
        # ---------------
        result_no4 = [0]
        eq_(common.equal_devision(0, 1), result_no4)
        result_no5 = [0] * 3
        eq_(common.equal_devision(0, 3), result_no5)
コード例 #7
0
def composite_rgbmyc_color_bar(img):
    """
    RGBMYCのカラーバーを画面下部に追加

    Parameters
    ----------
    img : array_like
        image data. shape is must be (V_num, H_num, 3).

    """
    img_width = img.shape[1]
    img_height = img.shape[0]

    scale_step = 65
    color_list = [(1, 0, 0), (0, 1, 0), (0, 0, 1), (1, 0, 1), (1, 1, 0),
                  (0, 1, 1)]

    width = _get_center_grad_width(img) // 2
    height = int(img_height * H_COLOR_GRADATION_HEIGHT)

    # color bar 作成
    # ----------------------
    bar_height_list = cmn.equal_devision(height, 6)
    bar_img_list = []
    for color, bar_height in zip(color_list, bar_height_list):
        color_bar = tpg.gen_step_gradation(width=width,
                                           height=bar_height,
                                           step_num=scale_step,
                                           bit_depth=10,
                                           color=color,
                                           direction='h')
        bar_img_list.append(color_bar)
    color_bar = np.vstack(bar_img_list)

    h_st = _get_center_obj_h_start(img)
    h_ed = h_st + width
    v_st = img_height - 1 - height
    v_ed = v_st + height
    img[v_st:v_ed, h_st:h_ed] = color_bar

    # マーカーとテキスト
    # ----------------------------------------
    marker_vertex = (h_st, v_st - 1)
    _make_marker(img, marker_vertex, direction='down')
    marker_vertex = (h_ed - 1, v_st - 1)
    _make_marker(img, marker_vertex, direction='down')
    text_pos_h = (h_st + int(img_width * MARKER_TEXT_PADDING_H))
    text_height, font_size = _get_text_height_and_font_size(img_height)
    text_pos_v = v_st - text_height
    text = "RGBMYC Scale. Video Level ⇒ 0, 16, 32, 48, ..., 992, 1008, 1023"
    _add_text_info(img,
                   st_pos=(text_pos_h, text_pos_v),
                   font_size=font_size,
                   text=text,
                   font_color=(0.4, 0.4, 0.4))
コード例 #8
0
def gen_hlg_gray_scale(img, width, height):
    rate = height / 2160
    scale_width = int(96 * rate)
    scale_height = height - 2  # "-2" is for pixels of frame.
    scale_step = 65
    bit_depth = 10
    scale_color = (1.0, 1.0, 1.0)
    text_offset_h = int((118) * (rate ** 0.6))
    text_offset_v = int(26 * rate)
    text_scale = 0.5 * (rate ** 0.6)
    text_d_offset = int(532 * (rate ** 0.7))

    # グレースケール設置
    # --------------------------
    scale = gen_step_gradation(width=scale_width, height=scale_height,
                               step_num=scale_step, color=scale_color,
                               direction='v', bit_depth=bit_depth)
    v_b = 0 + 1
    v_e = height - 1
    h_b = width - scale_width - 1
    h_e = width - scale_width + scale_width - 1
    img[v_b:v_e, h_b:h_e] = scale

    # テキスト情報付与
    # --------------------------
    font = cv2.FONT_HERSHEY_DUPLEX
    font_color = (0x8000, 0x8000, 0x0000)

    len_list = common.equal_devision(scale_height, scale_step)
    v_st = 0
    val_list = np.linspace(0, 2**bit_depth, scale_step)
    val_list[-1] -= 1
    luminance = get_bt2100_hlg_curve(val_list / ((2**bit_depth)-1)) * 1000

    for idx, x in enumerate(len_list):
        pos = (width - scale_width - text_offset_h, text_offset_v + v_st)
        v_st += x
        if luminance[idx] < 999.99999:
            text = "{:>4.0f}, {:>5.1f}".format(val_list[idx], luminance[idx])
        else:
            text = "{:>4.0f}, {:>4.0f}".format(val_list[idx], luminance[idx])
        cv2.putText(img, text, pos, font, text_scale, font_color)

    # 説明用のマーカー&テキスト付与
    # -------------------------------
    pos = (width - text_d_offset, text_offset_v)
    text = "VideoLevel, Brightness for HLG. -->"
    cv2.putText(img, text, pos, font, text_scale, font_color)
コード例 #9
0
def gen_ST2084_gray_scale(img, width, height):
    rate = height / 2160
    scale_width = int(96 * rate)
    scale_height = height - 2  # "-2" is for pixels of frame.
    scale_step = 65
    bit_depth = 10
    scale_color = (1.0, 1.0, 1.0)
    text_offset_h = int(12 * rate)
    text_offset_v = int(26 * rate)
    text_scale = 0.5 * (rate ** 0.6)
    text_d_offset = int(128 * (rate ** 0.6))

    # グレースケール設置
    # --------------------------
    scale = gen_step_gradation(width=scale_width, height=scale_height,
                               step_num=scale_step, color=scale_color,
                               direction='v', bit_depth=bit_depth)
    img[0+1:height-1, 0+1:scale_width+1] = scale

    # テキスト情報付与
    # --------------------------
    font = cv2.FONT_HERSHEY_DUPLEX
    font_color = (0x8000, 0x8000, 0x0000)

    len_list = common.equal_devision(scale_height, scale_step)
    v_st = 0
    val_list = np.linspace(0, 2**bit_depth, scale_step)
    val_list[-1] -= 1
    luminance = get_bt2100_pq_curve(val_list / ((2**bit_depth)-1))

    for idx, x in enumerate(len_list):
        pos = (scale_width + text_offset_h, text_offset_v + v_st)
        v_st += x
        if luminance[idx] < 999.99999:
            text = "{:>4.0f},{:>6.1f}".format(val_list[idx], luminance[idx])
        else:
            text = "{:>4.0f},{:>5.0f}".format(val_list[idx], luminance[idx])
        cv2.putText(img, text, pos, font, text_scale, font_color)

    # 説明用のマーカー&テキスト付与
    # -------------------------------
    pos = (scale_width + text_offset_h + text_d_offset, text_offset_v)
    text = "<-- VideoLevel, Brightness for PQ Curve."
    cv2.putText(img, text, pos, font, text_scale, font_color)
コード例 #10
0
def gen_rgbmyc_color_bar(img, width, height):
    # パラメータ設定
    # ----------------------
    rate = height / 2160
    bar_width = int(2048 * rate)
    bar_total_height = int(256 * rate)
    h_st = width // 2 - bar_width // 2
    v_st = height - bar_total_height - 1
    marker_width = int(30 * rate) + 1
    marker_height = int(20 * rate) + 1
    mk_space_v = int(32 * rate)
    scale_step = 65
    color_list = [(1, 0, 0), (0, 1, 0), (0, 0, 1),
                  (1, 0, 1), (1, 1, 0), (0, 1, 1)]

    # color bar 作成
    # ----------------------
    bar_height_list = common.equal_devision(bar_total_height, 6)
    bar_img_list = []
    for color, bar_height in zip(color_list, bar_height_list):
        color_bar = gen_step_gradation(width=bar_width, height=bar_height,
                                       step_num=scale_step, bit_depth=10,
                                       color=color, direction='h')
        bar_img_list.append(color_bar)
    color_bar = np.vstack(bar_img_list)

    img[v_st:v_st+bar_total_height, h_st:h_st+bar_width] = color_bar

    # marker用意
    # ----------------------
    marker = make_marker(marker_width, marker_height, rotate=0)
    vst1 = v_st - mk_space_v
    ved1 = v_st - mk_space_v + marker_height
    hst1 = h_st - (marker_width // 2) - 1
    hed1 = h_st - (marker_width // 2) - 1 + marker_width
    img[vst1:ved1, hst1:hed1] = marker
    vst3 = v_st - mk_space_v
    ved3 = v_st - mk_space_v + marker_height
    hst3 = h_st - (marker_width // 2) - 1 + bar_width
    hed3 = h_st - (marker_width // 2) - 1 + marker_width + bar_width
    img[vst3:ved3, hst3:hed3] = marker
コード例 #11
0
def gen_step_gradation(width=1024, height=128, step_num=17,
                       bit_depth=10, color=(1.0, 1.0, 1.0),
                       direction='h', debug=False):
    """
    # 概要
    階段状に変化するグラデーションパターンを作る。
    なお、引数の調整により滑らからに変化するグラデーションも作れる

    # 注意事項
    1階調ずつ滑らかに階調が変わるグラデーションを作る場合は
    ```(2 ** bit_depth) == (step_num + 1)```
    となるようにパラメータを指定すること。

    """
    max = 2 ** bit_depth

    # グラデーション方向設定
    # ----------------------
    if direction == 'h':
        pass
    else:
        temp = height
        height = width
        width = temp

    # 階段状に変化するグラデーションかどうか判定
    # -------------------------------------
    if (max + 1 != step_num):
        val_list = np.linspace(0, max, step_num)
        # このままだと最終ブロックが 256 とか 1024 なので 1引く
        # --------------------------------------------------
        val_list[-1] -= 1
    else:
        """
        滑らかに変化させる場合は末尾のデータが 256 や 1024 に
        なるため除外する。
        """
        val_list = np.linspace(0, max, step_num)[0:-1]
        step_num -= 1  # step_num は 引数で余計に +1 されてるので引く

        # 念のため1階調ずつの変化か確認
        # ---------------------------
        diff = val_list[1:] - val_list[0:-1]
        if (diff == 1).all():
            pass
        else:
            raise ValueError("calculated value is invalid.")

    # まずは水平1LINEのグラデーションを作る
    # -----------------------------------
    step_length_list = common.equal_devision(width, step_num)
    step_bar_list = []
    for step_idx, length in enumerate(step_length_list):
        step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
                for c_idx in range(3)]
        if direction == 'h':
            step = np.dstack(step)
            step_bar_list.append(step)
            step_bar = np.hstack(step_bar_list)
        else:
            step = np.dstack(step).reshape((length, 1, 3))
            step_bar_list.append(step)
            step_bar = np.vstack(step_bar_list)

    # ブロードキャストを利用して2次元に拡張する
    # ------------------------------------------
    if direction == 'h':
        img = step_bar * np.ones((height, 1, 3))
    else:
        img = step_bar * np.ones((1, height, 3))

    # np.uint16 にコンバート
    # ------------------------------
    img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))

    if debug:
        preview_image(img, 'rgb')

    return img
コード例 #12
0
def gen_step_gradation(width=1024, height=128, step_num=17,
                       bit_depth=10, color=(1.0, 1.0, 1.0),
                       direction='h', debug=False):
    """
    # 概要
    階段状に変化するグラデーションパターンを作る。
    なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。

    # 注意事項
    正確に1階調ずつ変化するグラデーションを作る場合は
    ```step_num = (2 ** bit_depth) + 1```
    となるようにパラメータを指定すること。具体例は以下のExample参照。

    # Example
    ```
    grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
                                step_num=257, bit_depth=8,
                                color=(1.0, 1.0, 1.0), direction='h')

    grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
                                 step_num=1025, bit_depth=10,
                                 color=(1.0, 1.0, 1.0), direction='h')
    ```
    """
    max = 2 ** bit_depth

    # グラデーション方向設定
    # ----------------------
    if direction == 'h':
        pass
    else:
        temp = height
        height = width
        width = temp

    if (max + 1 != step_num):
        """
        1階調ずつの増加では無いパターン。
        末尾のデータが 256 や 1024 になるため -1 する。
        """
        val_list = np.linspace(0, max, step_num)
        val_list[-1] -= 1
    else:
        """
        正確に1階調ずつ変化するパターン。
        末尾のデータが 256 や 1024 になるため除外する。
        """
        val_list = np.linspace(0, max, step_num)[0:-1]
        step_num -= 1  # step_num は 引数で余計に +1 されてるので引く

        # 念のため1階調ずつの変化か確認
        # ---------------------------
        diff = val_list[1:] - val_list[0:-1]
        if (diff == 1).all():
            pass
        else:
            raise ValueError("calculated value is invalid.")

    # まずは水平1LINEのグラデーションを作る
    # -----------------------------------
    step_length_list = common.equal_devision(width, step_num)
    step_bar_list = []
    for step_idx, length in enumerate(step_length_list):
        step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
                for c_idx in range(3)]
        if direction == 'h':
            step = np.dstack(step)
            step_bar_list.append(step)
            step_bar = np.hstack(step_bar_list)
        else:
            step = np.dstack(step).reshape((length, 1, 3))
            step_bar_list.append(step)
            step_bar = np.vstack(step_bar_list)

    # ブロードキャストを利用して2次元に拡張する
    # ------------------------------------------
    if direction == 'h':
        img = step_bar * np.ones((height, 1, 3))
    else:
        img = step_bar * np.ones((1, height, 3))

    # np.uint16 にコンバート
    # ------------------------------
    # img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))

    if debug:
        preview_image(img, 'rgb')

    return img
コード例 #13
0
def composite_pq_clip_checker(img):
    """
    PQカーブのクリップ位置を確認するためのテストパターンを作る。
    300nits, 500nits, 1000nits, 4000nits の 4パターン作成

    Parameters
    ----------
    img : array_like
        image data. shape is must be (V_num, H_num, 3).

    """
    global g_cuurent_pos_v
    img_width = img.shape[1]
    img_height = img.shape[0]
    vertual_width = (img_width // 1920) * 1920
    center_bright_list = [300, 500, 1000, 4000]
    level_num = 4
    level_step = 8
    width = int(img_width * PQ_CLIP_CHECKER_WIDTH)
    height = int(img_height * PQ_CLIP_CHECKER_HEIGHT)
    text_width = int(vertual_width * PQ_CLIP_CHECKER_DESC_TEXT_WIDTH)

    module_st_h = _get_center_obj_h_start(img)
    module_st_v = g_cuurent_pos_v + int(img_height * EXTERNAL_PADDING_V)

    h_offset = img_width - (2 * module_st_h) - width - text_width
    h_offset = cmn.equal_devision(h_offset, len(center_bright_list) - 1)

    font_size = int(height / (level_num * 2 + 1) / 96 * 72)

    img_list = []
    text_img_list = []
    for bright in center_bright_list:
        level_temp\
            = _get_pq_video_levels_for_clip_check(bright=bright,
                                                  level_num=level_num,
                                                  level_step=level_step)
        img_temp = _make_block(width, height, level_temp)
        bright_temp = colour.eotf(level_temp / 1024, 'ITU-R BT.2100 PQ')
        level_temp = level_temp[:, 0]
        bright_temp = bright_temp[:, 0]
        text_info = np.dstack((level_temp, bright_temp))
        text_info = text_info.reshape((text_info.shape[1], 2))
        img_list.append(img_temp)
        text_temp = gen_video_level_text_img(width=text_width,
                                             height=height,
                                             font_size=font_size,
                                             text_info=text_info)
        text_img_list.append(text_temp)

    # 配置
    # -------------------------------------
    for idx in range(len(img_list)):
        sum_h = 0
        for o_idx in range(idx):
            sum_h += h_offset[o_idx]
        st_h = module_st_h + sum_h
        st_v = module_st_v
        ed_h = st_h + width
        ed_v = st_v + height
        img[st_v:ed_v, st_h:ed_h] = img_list[idx]

        text_st_h = st_h + width
        text_ed_h = text_st_h + text_width
        text_st_v = st_v
        text_ed_v = ed_v
        img[text_st_v:text_ed_v, text_st_h:text_ed_h] = text_img_list[idx]

        text_pos_h = st_h
        text_height, font_size = _get_text_height_and_font_size(img_height)
        text_pos_v = st_v - text_height
        text = "▼ {}nits PQ clip check".format(center_bright_list[idx])
        _add_text_info(img,
                       st_pos=(text_pos_h, text_pos_v),
                       font_size=font_size,
                       text=text,
                       font_color=(0.4, 0.4, 0.4))

    # 現在のV座標を更新
    # -------------------------------------------
    g_cuurent_pos_v = ed_v