Esempio n. 1
0
def comparison_between_1000nits_and_108nits(
        min_exposure=-10.0, max_exposure=10.0):
    """
    Output Transforms の 1000nits と 108nits の結果を比較してみる
    """
    x, img = make_wrgbmyc_ramp(
        sample_num=1920, ref_val=0.18,
        min_exposure=min_exposure, max_exposure=max_exposure)

    # P3-D65 --> AP0 への変換
    ap0_img = gamut_convert_linear_data(img, cs.P3_D65, cs.ACES_AP0, ca='CAT02')
    ramp_fname = "./wrgbmyc_ramp_for_dolby_cinema.exr"
    exr_file_write(ap0_img, ramp_fname)

    # ctlrender で 108nits 変換を実行
    ctl_list = [OUTPUT_TRANS_P3D65_108NITS_CTL]
    ot_108_img = get_after_ctl_image(ramp_fname, ctl_list)[0, :, :3]

    # ctlrender で 1000nits 変換を実行
    ctl_list = [OUTPUT_TRANS_BT2020_1000NITS_CTL]
    ot_1000_img = get_after_ctl_image(ramp_fname, ctl_list)[0, :, :3]

    ot_108_img = tf.eotf_to_luminance(ot_108_img, tf.ST2084)
    ot_1000_img = tf.eotf_to_luminance(ot_1000_img, tf.ST2084)
    _plot_comparison_between_1000nits_and_108nits(
        x, ot_108_img=ot_108_img, ot_1000_img=ot_1000_img,
        x_min_exposure=int(min_exposure), x_max_exposure=int(max_exposure))
Esempio n. 2
0
def make_cinema_picture_on_sRGB():
    """
    1000nits と 108nits の Output Transform を当てた画像を
    sRGB のガンマに載せ替えて普通のモニターで見られるようにする。
    """
    # 作成済みの TestPattern に OutputTransform を適用
    src_img_name = "./src_img/src_bt2020_to_ap0.exr"
    ctl_108 = [OUTPUT_TRANS_P3D65_108NITS_CTL]
    ctl_1000 = [OUTPUT_TRANS_BT2020_1000NITS_CTL]
    ot_108_img = get_after_ctl_image(src_img_name, ctl_108)[:, :, :3]
    ot_1000_img = get_after_ctl_image(src_img_name, ctl_1000)[:, :, :3]

    # Linear に戻す
    ot_108_img = tf.eotf_to_luminance(ot_108_img, tf.ST2084) / 108
    ot_1000_img = tf.eotf_to_luminance(ot_1000_img, tf.ST2084) / 1000

    # clipping
    ot_108_img = np.clip(ot_108_img, 0.0, 1.0)
    ot_1000_img = np.clip(ot_1000_img, 0.0, 1.0)

    # sRGB がガンマに載せ替える
    ot_108_img = tf.oetf(ot_108_img, tf.SRGB)
    ot_1000_img = tf.oetf(ot_1000_img, tf.SRGB)

    # 保存
    cv2.imwrite("./108_sRGB.png", np.uint8(np.round(ot_108_img * 0xFF))[:, :, ::-1])
    cv2.imwrite("./1000_sRGB.png", np.uint8(np.round(ot_1000_img * 0xFF))[:, :, ::-1])
Esempio n. 3
0
def gen_eotf_lut_for_ocio(eotf_name=tf.ST2084,
                          sample_num=4096,
                          out_dir_name='./luts'):
    """
    ocio向けの EOTF 1DLUT を作る。
    Matrix とかは別途用意してね!

    Parameters
    ----------
    eotf_name : strings
        A name of the eotf.
        select from **transfer_functions** module.
    sample_num : int
        sample number.

    Returns
    -------
        None.
    """
    x = np.linspace(0, 1, sample_num)
    y = tf.eotf_to_luminance(x, eotf_name) / tf.REF_WHITE_LUMINANCE
    fname_base = "{}/{}_to_Linear.spi1d"
    fname = fname_base.format(out_dir_name, eotf_name.replace(" ", "_"))
    tylut.save_1dlut_spi_format(lut=y, filename=fname, min=0.0, max=1.0)
    print("{} was generated.".format(fname))
Esempio n. 4
0
def comparison_between_1000nits_and_108nits_primaries(
        min_exposure=-10.0, max_exposure=10.0, src_cs=cs.BT709):
    """
    Output Transforms の 1000nits と 108nits の結果を比較してみる。
    今回は Primary版。
    """
    x, img = make_wrgbmyc_ramp(
        sample_num=1920, ref_val=0.18,
        min_exposure=min_exposure, max_exposure=max_exposure)

    # src_cs --> AP0 への変換
    ap0_img = gamut_convert_linear_data(img, src_cs, cs.ACES_AP0, ca='CAT02')
    ramp_fname = "./wrgbmyc_ramp_for_dolby_cinema.exr"
    exr_file_write(ap0_img, ramp_fname)

    # ctlrender で 108nits 変換を実行
    ctl_list = [OUTPUT_TRANS_P3D65_108NITS_CTL]
    ot_108_img = get_after_ctl_image(ramp_fname, ctl_list)[:, :, :3]

    # ctlrender で 1000nits 変換を実行
    ctl_list = [OUTPUT_TRANS_BT2020_1000NITS_CTL]
    ot_1000_img = get_after_ctl_image(ramp_fname, ctl_list)[:, :, :3]

    ot_108_img = tf.eotf_to_luminance(ot_108_img, tf.ST2084)
    ot_1000_img = tf.eotf_to_luminance(ot_1000_img, tf.ST2084)

    # 1000nits のやつは DCI-P3 色域にする。108nitsと色域を合わせるために
    ot_1000_img = gamut_convert_linear_data(
        ot_1000_img / 10000, cs.BT2020, cs.P3_D65, ca='CAT02')
    ot_1000_img = ot_1000_img * 10000

    min_value = np.min(ot_108_img[0, :, :]) / 2
    max_value = np.max(ot_1000_img[0, :, :]) * 2

    print(min_value, max_value)

    for color_idx in range(len(COLOR_NAME_LIST)):
        _plot_comparison_between_1000nits_and_108nits_primaries(
            x, color_idx, src_cs, min_value, max_value,
            ot_108_img=ot_108_img, ot_1000_img=ot_1000_img,
            x_min_exposure=int(min_exposure),
            x_max_exposure=int(max_exposure))
Esempio n. 5
0
    def get_video_level_text_img(self, scale_step, width, type=1):
        """
        ステップカラーに付与する VideoLevel & Luminance 情報。
        最初は縦向きで作って、最後に横向きにする
        """
        fg_color = self.get_fg_color_for_pillow()
        text_height_list = tpg.equal_devision(width, scale_step)
        font_size = self.get_color_bar_text_font_size(width / scale_step)
        video_level = np.linspace(0, 2**self.bit_depth, scale_step)
        video_level[-1] -= 1
        video_level_float = video_level / self.img_max
        bright_list = tf.eotf_to_luminance(video_level_float,
                                           self.transfer_function)
        if type == 1:
            text_width = int(self.step_bar_text_width * self.img_height)
        elif type == 2:
            text_width = int(self.step_bar_text_width_type2 * self.img_height)
        else:
            raise ValueError('invalid tpg-type')
        txt_img = Image.new("RGB", (text_width, width), (0x00, 0x00, 0x00))
        draw = ImageDraw.Draw(txt_img)
        font = ImageFont.truetype("./fonts/NotoSansMonoCJKjp-Regular.otf",
                                  font_size)
        st_pos_h = 0
        st_pos_v = 0
        for idx in range(scale_step):
            pos = (st_pos_h, st_pos_v)
            if bright_list[idx] < 999.99999:
                text_data = " {:>4.0f},{:>6.1f} nit".format(
                    video_level[idx], bright_list[idx])
            else:
                text_data = " {:>4.0f},{:>5.0f}  nit".format(
                    video_level[idx], bright_list[idx])
            draw.text(pos, text_data, font=font, fill=fg_color)
            st_pos_v += text_height_list[idx]

        txt_img = self.convert_from_pillow_to_numpy(txt_img)
        txt_img = np.rot90(txt_img)

        return txt_img
Esempio n. 6
0
def gen_eotf_lut_for_ocio(name, sample_num=4096):
    """
    ocio向けの EOTF 1DLUT を作る。
    Matrix とかは別途用意してね!

    Parameters
    ----------
    name : strings
        the name of the gamma curve.
        select from **transfer_functions** module.
    sample_num : int
        sample number.

    Returns
    -------
        None.
    """
    x = np.linspace(0, 1, sample_num)
    y = ty.eotf_to_luminance(x, name) / REF_WHITE_LUMINANCE
    fname_base = "./ty_ocio/luts/{}_to_Linear.spi1d"
    fname = fname_base.format(name.replace(" ", "_"))
    tylut.save_1dlut_spi_format(lut=y, filename=fname, min=0.0, max=1.0)
Esempio n. 7
0
def gen_pq_to_linear_lut_for_ocio():
    x = np.linspace(0, 1, 4096)
    y = ty.eotf_to_luminance(x, ty.ST2084) / REF_WHITE_LUMINANCE
    fname = "./ty_ocio/luts/ST2084_to_Linear_10000nits.spi1d"
    tylut.save_1dlut_spi_format(lut=y, filename=fname, min=0.0, max=1.0)
Esempio n. 8
0
 def save_exr_image(self, fname):
     attr = None
     img_temp = self.img / self.img_max
     img_temp = tf.eotf_to_luminance(img_temp, name=self.tf) / 100
     writer = TyWriter(img_temp, fname, attr)
     writer.write(out_img_type_desc=oiio.FLOAT)