def _get_pq_video_levels_for_clip_check(bright=1000, level_num=4, level_step=4): """ bright で指定した階調を中心とした計 level_num 個 のビデオレベルを 計算で求める。 Parameters ---------- bright : numeric. target brightness.1 level_num : numeric. a number to calculate. level_step : a step of the video level. Returns ------- array_like video levels Examples -------- >>> _get_pq_video_levels_for_clip_check(1000, 4, 4) [769-16, 769-12, 769-8, 769-4, 769, 769+4, 769+8, 769+12, 769+16] """ center = colour.oetf(bright, function="ST 2084") * 1023 center = np.uint16(np.round(center)) num = level_num * 2 + 1 ret_val = [center + level_step * (x - level_num) for x in range(num)] ret_val = _reshape_for_2d(ret_val) return ret_val
def apply_oetf(source: list[float], luma: float): """ args: source: linear RGB tuple (0-1, 0-1, 0-1) luma: luma value for the ORIGINAL sRGB colour. """ args = parse_args() pq_result = colour.oetf(source, 'ITU-R BT.2100 PQ') hlg_result = colour.oetf(source, 'ITU-R BT.2100 HLG') if args.gamma == 'pq': return pq_result elif args.gamma == 'hlg': return hlg_result else: # linear mix between 0.1 and 0.2 pq_mix_ratio = (np.clip(luma, 0.1, 0.2) - 0.1) / 0.1 return hlg_result * (1-pq_mix_ratio) + pq_result * pq_mix_ratio
def composite_pq_color_checker(img): """ SDRレンジのカラーチェッカーをPQ用に作成&貼り付け Parameters ---------- img : array_like image data. shape is must be (V_num, H_num, 3). """ # 基本情報 # -------------------------------------- img_width = img.shape[1] img_height = img.shape[0] h_num = 4 v_num = 6 patch_width = int(img_height * COLOR_CHECKER_SIZE) patch_height = patch_width patch_space = int(img_height * COLOR_CHECKER_PADDING) patch_st_h = int(img_width * COLOR_CHECKER_H_START) patch_st_v = int(img_height * COLOR_CHECKER_V_START) xyY = np.array(tpg.const_color_checker_xyY) xyY = xyY.reshape((1, xyY.shape[0], xyY.shape[1])) rgb = cc.xyY_to_RGB(xyY=xyY, gamut=cc.const_rec2020_xy, white=cc.const_d65_large_xyz) rgb[rgb < 0] = 0 # scale to 100 nits (rgssb/100). # rgb = np.uint16(np.round(cc.linear_to_pq(rgb/100) * 0xFFFF)) rgb = colour.oetf(rgb * 100, function="ST 2084") * 0xFFC0 rgb = np.uint16(np.round(rgb)) for idx in range(h_num * v_num): h_idx = idx // v_num v_idx = v_num - (idx % v_num) - 1 patch = np.ones((patch_height, patch_width, 3), dtype=np.uint16) patch[:, :] = rgb[0][idx] st_h = patch_st_h + (patch_width + patch_space) * h_idx st_v = patch_st_v + (patch_height + patch_space) * v_idx img[st_v:st_v + patch_height, st_h:st_h + patch_width] = patch text_pos_h = patch_st_h text_height, font_size = _get_text_height_and_font_size(img_height) text_pos_v = st_v - text_height text = "▼ ColorChecker for ST2084" _add_text_info(img, st_pos=(text_pos_h, text_pos_v), font_size=font_size, text=text, font_color=(0.4, 0.4, 0.4))
def composite_pq_ebu_test_colour(img): """ SDRレンジのEBU TEST COLOR をPQ用に作成&貼り付け Parameters ---------- img : array_like image data. shape is must be (V_num, H_num, 3). """ # 基本情報 # -------------------------------------- img_width = img.shape[1] img_height = img.shape[0] h_num = 3 v_num = 5 patch_width = int(img_height * EBU_TEST_COLOR_SIZE) patch_height = patch_width patch_space = int(img_height * EBU_TEST_COLOR_PADDING) patch_st_h = int(img_width * EBU_TEST_COLOR_H_START) patch_st_v = int(img_height * EBU_TEST_COLOR_V_START) rgb = _get_ebu_color_rgb_from_XYZ() rgb = colour.oetf(rgb * 100, function="ST 2084") * 0xFFC0 rgb = np.uint16(np.round(rgb)) for idx in range(h_num * v_num): h_idx = idx // v_num v_idx = v_num - (idx % v_num) - 1 patch = np.ones((patch_height, patch_width, 3), dtype=np.uint16) patch[:, :] = rgb[0][idx] st_h = patch_st_h + (patch_width + patch_space) * h_idx st_v = patch_st_v + (patch_height + patch_space) * v_idx img[st_v:st_v + patch_height, st_h:st_h + patch_width] = patch text_pos_h = patch_st_h text_height, font_size = _get_text_height_and_font_size(img_height) text_pos_v = st_v - text_height text = "▼ EBU TEST COLOUR for PQ" _add_text_info(img, st_pos=(text_pos_h, text_pos_v), font_size=font_size, text=text, font_color=(0.4, 0.4, 0.4))
#!/usr/bin/env python # -*- coding: utf-8 -*- import colour from colour.utilities.verbose import message_box """ C = 18 / 100 """ C = input("Enter a 10bit Value: ") message_box(('Encoding to video component signal value using "BT.709" OETF ' 'and given linear-light value:\n' '\n\t{0}'.format(C))) print(colour.oetf_BT709(C)) print(colour.oetf(C, function='BT.709')) print('\n')
"""Showcases colour component transfer functions (CCTF) relates computations.""" import colour from colour.utilities import message_box message_box("Colour Component Transfer Functions (CCTF) Computations") C = 18 / 100 message_box( f'Encoding to video component signal value using "BT.709" OETF and given ' f"linear-light value:\n\n\t{C}" ) print(colour.oetf(C, function="ITU-R BT.709")) print(colour.models.oetf_BT709(C)) print("\n") N = 0.40900773 message_box( f'Decoding to linear-light value using "BT.1886" EOTF and given video ' f"component signal value:\n\n\t{N}" ) print(colour.eotf(N, function="ITU-R BT.1886")) print(colour.models.eotf_BT1886(N)) print("\n") message_box(f'Encoding to "Cineon" using given linear-light value:\n\n\t{C}') print(colour.log_encoding(C, function="Cineon")) print(colour.models.log_encoding_Cineon(C))
# -*- coding: utf-8 -*- """ Showcases colour component transfer functions (CCTF) relates computations. """ import colour from colour.utilities import message_box message_box('Colour Component Transfer Functions (CCTF) Computations') C = 18 / 100 message_box(('Encoding to video component signal value using "BT.709" OETF ' 'and given linear-light value:\n' '\n\t{0}'.format(C))) print(colour.oetf(C, function='ITU-R BT.709')) print(colour.models.oetf_BT709(C)) print('\n') N = 0.40900773 message_box(('Decoding to linear-light value using "BT.1886" EOTF and given ' ' video component signal value:\n' '\n\t{0}'.format(N))) print(colour.eotf(N, function='ITU-R BT.1886')) print(colour.models.eotf_BT1886(N)) print('\n') message_box(('Encoding to "Cineon" using given linear-light value:\n' '\n\t{0}'.format(C)))
""" Showcases colour component transfer functions (CCTF) relates computations. """ import colour from colour.utilities.verbose import message_box message_box('Colour Component Transfer Functions (CCTF) Computations') C = 18 / 100 message_box(('Encoding to video component signal value using "BT.709" OETF ' 'and given linear-light value:\n' '\n\t{0}'.format(C))) print(colour.oetf_BT709(C)) print(colour.oetf(C, function='BT.709')) print('\n') N = 0.40900773 message_box(('Decoding to linear-light value using "BT.1886" EOTF and given ' ' video component signal value:\n' '\n\t{0}'.format(N))) print(colour.eotf_BT1886(N)) print(colour.eotf(N, function='BT.1886')) print('\n') message_box(('Encoding to "Cineon" using given linear-light value:\n' '\n\t{0}'.format(C))) print(colour.log_encoding_Cineon(C))
RESOURCES_DIRECTORY = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'resources') colour_style() ISHIHARA_CBT_3_IMAGE = colour.oetf_reverse( colour.read_image( os.path.join(RESOURCES_DIRECTORY, 'Ishihara_Colour_Blindness_Test_Plate_3.png')), function='sRGB') message_box('Colour Blindness Plots') message_box('Displaying "Ishihara Colour Blindness Test - Plate 3".') plot_image(colour.oetf(ISHIHARA_CBT_3_IMAGE), text_parameters={'text': 'Normal Trichromat', 'color': 'black'}) print('\n') message_box('Simulating average "Protanomaly" on ' '"Ishihara Colour Blindness Test - Plate 3" with Machado (2010) ' 'model and pre-computed matrix.') plot_cvd_simulation_Machado2009( ISHIHARA_CBT_3_IMAGE, 'Protanomaly', 0.5, text_parameters={'text': 'Protanomaly - 50%', 'color': 'black'}) print('\n') M_a = colour.anomalous_trichromacy_matrix_Machado2009( colour.LMS_CMFS.get('Stockman & Sharpe 2 Degree Cone Fundamentals'),
def composite_bt2020_check_pattern(img): """ BT.2020 でのクリップ具合?を確認するパターン Parameters ---------- img : array_like image data. shape is must be (V_num, H_num, 3). """ global g_cuurent_pos_v img_width = img.shape[1] img_height = img.shape[0] module_st_h = _get_center_obj_h_start(img) module_st_v = g_cuurent_pos_v + int(img_height * EXTERNAL_PADDING_V) width = int(img_height * CSF_COLOR_PATTERN_WIDTH) height = int(img_height * CSF_COLOR_PATTERN_HEIGHT) stripe_num = CSF_COLOR_STRIPE_NUM # Primary シマシマのビデオレベル算出 # ---------------------------------- rgb_dci = _get_dci_primary_on_bt2020() * 0.01 # 100nits rgb_dci[rgb_dci < 0] = 0.0 rgb_dci[rgb_dci > 1] = 1.0 rgb_2020 = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]] rgb_2020 = np.array(rgb_2020) * 0.01 # 100nits rgb_2020 = colour.oetf(rgb_2020 * 10000, 'ST 2084') rgb_dci = colour.oetf(rgb_dci * 10000, 'ST 2084') rgb_2020 = np.uint16(np.round(rgb_2020 * 0x3FF)) rgb_dci = np.uint16(np.round(rgb_dci * 0x3FF)) rgb_img = [ _make_csf_color_image(width=width, height=height, lv1=rgb_2020[idx], lv2=rgb_dci[idx], stripe_num=stripe_num) for idx in range(3) ] # 赤の配置 # --------------------------------------- st_v = module_st_v ed_v = st_v + height st_h = module_st_h ed_h = st_h + width img[st_v:ed_v, st_h:ed_h] = rgb_img[0] text_pos_h = st_h text_height, font_size = _get_text_height_and_font_size(img_height) text_pos_v = st_v - text_height text = "BT.2020/DCI-P3" _add_text_info(img, st_pos=(text_pos_h, text_pos_v), font_size=font_size, text=text, font_color=(0.4, 0.4, 0.4)) # 緑の配置 # --------------------------------------- st_h = (img_width // 2) - (width // 2) ed_h = st_h + width img[st_v:ed_v, st_h:ed_h] = rgb_img[1] text_pos_h = st_h text_height, font_size = _get_text_height_and_font_size(img_height) text_pos_v = st_v - text_height _add_text_info(img, st_pos=(text_pos_h, text_pos_v), font_size=font_size, text=text, font_color=(0.4, 0.4, 0.4)) # 青の配置 # ------------------------------------------- st_h = img_width - module_st_h - width ed_h = st_h + width img[st_v:ed_v, st_h:ed_h] = rgb_img[2] text_pos_h = st_h text_height, font_size = _get_text_height_and_font_size(img_height) text_pos_v = st_v - text_height _add_text_info(img, st_pos=(text_pos_h, text_pos_v), font_size=font_size, text=text, font_color=(0.4, 0.4, 0.4)) # 現在のV座標を更新 g_cuurent_pos_v = ed_v