message_box(
    f'Encoding to video component signal value using "BT.709" OETF and given '
    f"linear-light value:\n\n\t{C}"
)
print(colour.oetf(C, function="ITU-R BT.709"))
print(colour.models.oetf_BT709(C))

print("\n")

N = 0.40900773
message_box(
    f'Decoding to linear-light value using "BT.1886" EOTF and given video '
    f"component signal value:\n\n\t{N}"
)
print(colour.eotf(N, function="ITU-R BT.1886"))
print(colour.models.eotf_BT1886(N))

print("\n")

message_box(f'Encoding to "Cineon" using given linear-light value:\n\n\t{C}')
print(colour.log_encoding(C, function="Cineon"))
print(colour.models.log_encoding_Cineon(C))

print("\n")

N = 0.45731961
message_box(
    f'Decoding to linear-light using given "Cineon" code value:\n\n\t{N}'
)
print(colour.log_decoding(N, function="Cineon"))
Exemple #2
0
C = 18 / 100

message_box(('Encoding to video component signal value using "BT.709" OETF '
             'and given linear-light value:\n'
             '\n\t{0}'.format(C)))
print(colour.oetf(C, function='ITU-R BT.709'))
print(colour.models.oetf_BT709(C))

print('\n')

N = 0.40900773
message_box(('Decoding to linear-light value using "BT.1886" EOTF and given '
             ' video component signal value:\n'
             '\n\t{0}'.format(N)))
print(colour.eotf(N, function='ITU-R BT.1886'))
print(colour.models.eotf_BT1886(N))

print('\n')

message_box(('Encoding to "Cineon" using given linear-light value:\n'
             '\n\t{0}'.format(C)))
print(colour.log_encoding_curve(C, curve='Cineon'))
print(colour.models.log_encoding_Cineon(C))

print('\n')

N = 0.45731961
message_box(('Decoding to linear-light using given "Cineon" code value:\n'
             '\n\t{0}'.format(N)))
print(colour.log_decoding_curve(N, curve='Cineon'))
Exemple #3
0
C = int((input("Enter a % value: "))
C = C/100

message_box(('Encoding to video component signal value using "BT.709" OETF '
             'and given linear-light value:\n'
             '\n\t{0}'.format(C)))
print(colour.models.oetf_BT709(C))
print(colour.oetf(C, function='BT.709'))

print('\n')

N = 0.40900773
message_box(('Decoding to linear-light value using "BT.1886" EOTF and given '
             ' video component signal value:\n'
             '\n\t{0}'.format(N)))
print(colour.eotf_BT1886(N))
print(colour.eotf(N, function='BT.1886'))

print('\n')

N=float(input("Enter a value: "))

#N = 0.40900773
message_box(('Decoding to linear-light value using "PQ" EOTF and given '
             ' video component signal value:\n'
             '\n\t{0}'.format(N)))
print(colour.oetf_ST2084(N))
print(colour.eotf(N, function='ST 2084', L_p=10000))

print('\n')
C = 18 / 100

message_box(('Encoding to video component signal value using "BT.709" OETF '
             'and given linear-light value:\n'
             '\n\t{0}'.format(C)))
print(colour.oetf(C, function='ITU-R BT.709'))
print(colour.models.oetf_BT709(C))

print('\n')

N = 0.40900773
message_box(('Decoding to linear-light value using "BT.1886" EOTF and given '
             ' video component signal value:\n'
             '\n\t{0}'.format(N)))
print(colour.eotf(N, function='ITU-R BT.1886'))
print(colour.models.eotf_BT1886(N))

print('\n')

message_box(('Encoding to "Cineon" using given linear-light value:\n'
             '\n\t{0}'.format(C)))
print(colour.log_encoding_curve(C, curve='Cineon'))
print(colour.models.log_encoding_Cineon(C))

print('\n')

N = 0.45731961
message_box(('Decoding to linear-light using given "Cineon" code value:\n'
             '\n\t{0}'.format(N)))
print(colour.log_decoding_curve(N, curve='Cineon'))
Exemple #5
0
def composite_hlg_vertical_gray_scale(img):
    """
    execute the composition processing for the virtical hlg gradation.

    Parameters
    ----------
    img : array_like
        image data. shape is must be (V_num, H_num, 3).

    Returns
    -------
    ndarray
        a image with pq gray scale.

    Notes
    -----
    -

    Examples
    --------
    >>> img = np.zeros((1080, 1920, 3), np.dtype=uint8)
    >>> composite_hlg_vertical_gray_scale(img)
    """
    # 基本情報作成
    # ------------------------------------------------------
    img_width = img.shape[1]
    img_height = img.shape[0]
    vertual_width = (img_width // 1920) * 1920
    scale_width = int(vertual_width * SIDE_V_GRADATION_WIDTH)
    scale_height = img_height - 2  # "-2" is for pixels of frame.
    text_width = int(vertual_width * SIDE_V_GRADATION_TEXT_WIDTH)
    text_height = img_height - 2  # "-2" is for pixels of frame.
    bit_depth = 10
    video_max = (2**bit_depth) - 1
    step_num = 65

    # HLGのグラデーション作成
    # ------------------------------------------------------
    scale = tpg.gen_step_gradation(width=scale_width,
                                   height=scale_height,
                                   step_num=step_num,
                                   color=(1.0, 1.0, 1.0),
                                   direction='v',
                                   bit_depth=bit_depth)
    h_st = img_width - 1 - scale_width
    h_ed = -1
    img[0 + 1:scale_height + 1, h_st:h_ed] = scale

    # ビデオレベルと明るさを表示
    # ------------------------------------------------------
    video_level = [
        x * (2**bit_depth) // (step_num - 1) for x in range(step_num)
    ]
    video_level[-1] -= 1  # 最終データは1多いので引いておく
    video_level_float = np.array(video_level) / video_max
    bright = colour.eotf(video_level_float,
                         'ITU-R BT.2100 HLG',
                         L_W=1000,
                         gamma=1.2)
    text_info = np.dstack((video_level, bright)).reshape((bright.shape[0], 2))
    font_size = int(text_height / step_num / 96 * 72)
    txt_img = gen_video_level_text_img(width=text_width,
                                       height=text_height,
                                       font_size=font_size,
                                       text_info=text_info)
    h_st = img_width - (text_width + scale_width)
    h_ed = h_st + text_width
    img[0 + 1:text_height + 1, h_st:h_ed, :] = txt_img

    # 説明用テキスト付与
    # ------------------------------------------------------
    text_pos_h\
        = (h_st - int(vertual_width * SIDE_V_GRADATION_DESC_TEXT_WIDTH_HLG))
    text_height, font_size = _get_text_height_and_font_size(img_height)
    text_pos_v = int(img_height * SIDE_V_GRADATION_DESC_TEXT_V_OFFSET)
    text = "HLG(SG=1.2) video level(10bit) and luminance(nits) ▶"
    _add_text_info(img,
                   st_pos=(text_pos_h, text_pos_v),
                   font_size=font_size,
                   text=text,
                   font_color=(0.4, 0.4, 0.4))
Exemple #6
0
def composite_pq_clip_checker(img):
    """
    PQカーブのクリップ位置を確認するためのテストパターンを作る。
    300nits, 500nits, 1000nits, 4000nits の 4パターン作成

    Parameters
    ----------
    img : array_like
        image data. shape is must be (V_num, H_num, 3).

    """
    global g_cuurent_pos_v
    img_width = img.shape[1]
    img_height = img.shape[0]
    vertual_width = (img_width // 1920) * 1920
    center_bright_list = [300, 500, 1000, 4000]
    level_num = 4
    level_step = 8
    width = int(img_width * PQ_CLIP_CHECKER_WIDTH)
    height = int(img_height * PQ_CLIP_CHECKER_HEIGHT)
    text_width = int(vertual_width * PQ_CLIP_CHECKER_DESC_TEXT_WIDTH)

    module_st_h = _get_center_obj_h_start(img)
    module_st_v = g_cuurent_pos_v + int(img_height * EXTERNAL_PADDING_V)

    h_offset = img_width - (2 * module_st_h) - width - text_width
    h_offset = cmn.equal_devision(h_offset, len(center_bright_list) - 1)

    font_size = int(height / (level_num * 2 + 1) / 96 * 72)

    img_list = []
    text_img_list = []
    for bright in center_bright_list:
        level_temp\
            = _get_pq_video_levels_for_clip_check(bright=bright,
                                                  level_num=level_num,
                                                  level_step=level_step)
        img_temp = _make_block(width, height, level_temp)
        bright_temp = colour.eotf(level_temp / 1024, 'ITU-R BT.2100 PQ')
        level_temp = level_temp[:, 0]
        bright_temp = bright_temp[:, 0]
        text_info = np.dstack((level_temp, bright_temp))
        text_info = text_info.reshape((text_info.shape[1], 2))
        img_list.append(img_temp)
        text_temp = gen_video_level_text_img(width=text_width,
                                             height=height,
                                             font_size=font_size,
                                             text_info=text_info)
        text_img_list.append(text_temp)

    # 配置
    # -------------------------------------
    for idx in range(len(img_list)):
        sum_h = 0
        for o_idx in range(idx):
            sum_h += h_offset[o_idx]
        st_h = module_st_h + sum_h
        st_v = module_st_v
        ed_h = st_h + width
        ed_v = st_v + height
        img[st_v:ed_v, st_h:ed_h] = img_list[idx]

        text_st_h = st_h + width
        text_ed_h = text_st_h + text_width
        text_st_v = st_v
        text_ed_v = ed_v
        img[text_st_v:text_ed_v, text_st_h:text_ed_h] = text_img_list[idx]

        text_pos_h = st_h
        text_height, font_size = _get_text_height_and_font_size(img_height)
        text_pos_v = st_v - text_height
        text = "▼ {}nits PQ clip check".format(center_bright_list[idx])
        _add_text_info(img,
                       st_pos=(text_pos_h, text_pos_v),
                       font_size=font_size,
                       text=text,
                       font_color=(0.4, 0.4, 0.4))

    # 現在のV座標を更新
    # -------------------------------------------
    g_cuurent_pos_v = ed_v