Exemple #1
0
def get_nfor_img_and_get_relmse(in_pth):
    """
    !! 임시 !!
    기존에 tungsten 랜더러에서 나온 buffer에서 nfor을 가져옴.
    그리고 relMSE를 나오게 뽑음.
    """
    input_spp_list = [32, 100, 256, 512, 1024, 2048, 4096] # [32, 100, 256, 512, 1024]
    out_pth = in_pth + "/nfor"

    if not os.path.exists(out_pth):
        os.mkdir(out_pth)

    f = open(out_pth + '/nfor_relMSE.txt', 'w')

    ref_buffer = exr.read_all(os.path.join(in_pth, "out_64kspp.exr"))
    ref_color = ref_buffer['diffuse'] + ref_buffer['specular']

    for i in range(len(input_spp_list)):
        input_name = "out_" + str(input_spp_list[i]) + "spp.exr"

        input_buffer = exr.read_all(os.path.join(in_pth, input_name))

        input_nfor = input_buffer['nfor']

        rmse = calcRelMSE(input_nfor, ref_color)

        rmse_str = str(input_spp_list[i]) + "spp image relMSE : " + str(rmse)
        f.write(rmse_str)
        f.write("\n")
        print(rmse_str)

        exr.write(out_pth + "/" + str(input_spp_list[i]) + "spp_nfor.exr", input_nfor)
Exemple #2
0
def get_all_exr_dataset_one(dataset_dirs, suffix=""):
    all_data = []
    for dataset_dir in dataset_dirs:
        files = os.listdir(dataset_dir)
        files = [file for file in files if file.endswith(".exr")]
        # files = [fn for fn in glob.glob(os.path.join(dataset_dir, suffix))]

        filename = os.path.join(dataset_dir, files[0])
        data = exr.read_all(filename)
        all_data.append(data['default'][:, :, 0:3])

    return np.array(all_data)
Exemple #3
0
def save_all_exr_dataset(dataset_dirs, scene, target):
    all_data = []
    for dataset_dir in dataset_dirs:
        files = os.listdir(dataset_dir)
        files = [fn for fn in glob.glob(os.path.join(dataset_dir, '*.exr'))]

        for f in files:
            filename = os.path.join(dataset_dir, f)
            data = exr.read_all(filename)
            all_data.append(data['default'][:, :, 0:3])
            exr.write(os.path.join('D:/training/', target, scene, f), data['default'][:, :, 0:3])

    return np.array(all_data)
Exemple #4
0
def load_normalize_one_exr_for_test(input_pth,
                                    ref_pth,
                                    BUFFER,
                                    load_dtype="HALF",
                                    color_merge=True):

    channels = {
        "diffuse": 3,
        "specular": 3,
        "albedo": 3,
        "depth": 1,
        "normal": 3,
        "diffuseVariance": 1,
        "specularVariance": 1,
        "albedoVariance": 1,
        "depthVariance": 1,
        "normalVariance": 1
    }

    input_all_buffer = exr.read_all(input_pth, precision=load_dtype)
    ref_all_buffer = exr.read_all(ref_pth, precision=load_dtype)

    # test_depth = input_all_buffer['depth']

    "저장 공간 설정 from sample"
    total_ch = 0
    ch_BUFFER = []
    for b in BUFFER:
        # h, w, ch = sample_dict[b].shape()
        sample_data = input_all_buffer[b]
        h, w, ch = sample_data.shape
        total_ch += ch
        ch_BUFFER.append(ch)

    # color
    if color_merge:
        input_buffer = np.zeros((1, h, w, total_ch - 3),
                                dtype=sample_data.dtype)
        ref_buffer = np.zeros((1, h, w, 3), dtype=sample_data.dtype)

        input_buffer[0, :, :, :3] = norm.normalization_signed_log(
            input_all_buffer["diffuse"] + input_all_buffer["specular"])
        ref_buffer[0, :, :, :3] = norm.normalization_signed_log(
            ref_all_buffer["diffuse"] + ref_all_buffer["specular"])
        start_ch = 3
    else:
        input_buffer = np.zeros((1, h, w, total_ch), dtype=sample_data.dtype)
        ref_buffer = np.zeros((1, h, w, 6), dtype=sample_data.dtype)

        input_buffer[0, :, :, :3] = norm.normalization_signed_log(
            input_all_buffer["diffuse"])
        input_buffer[0, :, :, 3:6] = norm.normalization_signed_log(
            input_all_buffer["specular"])
        ref_buffer[0, :, :, :3] = norm.normalization_signed_log(
            ref_all_buffer["diffuse"])
        ref_buffer[0, :, :, 3:6] = norm.normalization_signed_log(
            ref_all_buffer["specular"])
        start_ch = 6

    # g-buffer
    for b in range(len(BUFFER)):
        if BUFFER[b] == 'depth':
            input_buffer[:, :, :, start_ch:start_ch +
                         channels[BUFFER[b]]] = norm.normalize_depth_1ch_v1(
                             input_all_buffer[BUFFER[b]])
            start_ch += channels[BUFFER[b]]
        elif BUFFER[b] == 'normal':
            input_buffer[:, :, :, start_ch:start_ch +
                         channels[BUFFER[b]]] = norm.normalize_normal(
                             input_all_buffer[BUFFER[b]])
            start_ch += channels[BUFFER[b]]
        elif BUFFER[b] == 'albedo':
            input_buffer[:, :, :, start_ch:start_ch +
                         channels[BUFFER[b]]] = input_all_buffer[BUFFER[b]]
            start_ch += channels[BUFFER[b]]

    return input_buffer, ref_buffer
Exemple #5
0
def load_exrs_from_tungsten(DIR,
                            SCENE,
                            BUFFER,
                            mini_batch=True,
                            flag_saving=False,
                            saving_pth="tmp",
                            input_endswith="00128spp.exr",
                            ref_endswith="08192spp.exr",
                            load_dtype="HALF"):
    """
    input : Dir = scene 폴더들이 있는 상위 폴더 위치, SCENE = 가져올 scene, BUFFER = 특정 feature 버퍼 이름
    output : input buffer, ref buffer
    특징 1 : original tungsten DB에서 원하는 input, ref 버퍼 만들기
    특징 2 : 하나하나 읽어와 저장하는 구조라서 시간이 많이 걸림.
    특징 3 : input = 124 spp, target = 8k spp 고정
    특징 4 : 데이터의 양이 너무 많아서 굉장히 불러오는데 오래걸림.

    """

    # DIR = 'D:/Tunsten_deep_learning_denoising_dataset/deep_learning_denoising/renderings'
    # SCENE = ['bathroom2', 'car2', 'classroom', 'house', 'room2', 'room3', 'spaceship', 'staircase']
    # BUFFER = ['diffuse', 'specular', 'albedo', 'depth', 'normal']

    "path 불러오기"
    ALL_SCENES = []  # 모든 spp, feature에 해당하는 buffer 이름
    input_all_features = []  # [scene, files]
    ref_all_feature = []

    total_num_imgs = 0

    for i in range(len(SCENE)):
        # ALL_SCENES.append(os.path.join(DIR, SCENE[i]))

        files = os.listdir(os.path.join(DIR, SCENE[i]))

        input_all_features_one_scene = [
            file for file in files if file.endswith(input_endswith)
        ]
        ref_all_features_one_scene = [
            file for file in files if file.endswith(ref_endswith)
        ]

        if not mini_batch:
            input_all_features.append(input_all_features_one_scene)
            ref_all_feature.append(ref_all_features_one_scene)
            total_num_imgs += len(input_all_features_one_scene)
        else:
            input_all_features.append(input_all_features_one_scene[:2])
            ref_all_feature.append(ref_all_features_one_scene[:2])
            total_num_imgs += 2

    "저장 공간 설정 from sample"
    sample_dict = exr.read_all(os.path.join(DIR, SCENE[0],
                                            input_all_features[0][0]),
                               precision=load_dtype)
    total_ch = 0
    ch_BUFFER = []
    for b in BUFFER:
        # h, w, ch = sample_dict[b].shape()
        sample_data = sample_dict[b]
        h, w, ch = sample_data.shape
        total_ch += ch
        ch_BUFFER.append(ch)

    input_buffer = np.ones((total_num_imgs, h, w, total_ch),
                           dtype=sample_data.dtype)
    ref_buffer = np.ones((total_num_imgs, h, w, 6), dtype=sample_data.dtype)

    "exr load"
    img_index = 0
    for s in range(len(input_all_features)):
        for f in range(len(input_all_features[s])):
            print(f)

            "input : network input"
            input_file_name = input_all_features[s][f]
            one_input = exr.read_all(os.path.join(DIR, SCENE[s],
                                                  input_file_name),
                                     precision=load_dtype)
            start_ch = 0
            for b in range(len(BUFFER)):
                input_buffer[img_index, :, :, start_ch:start_ch +
                             ch_BUFFER[b]] = one_input[BUFFER[b]]
                start_ch += ch_BUFFER[b]

            "ref : 오직 color 부분만"
            ref_file_name = ref_all_feature[s][f]
            one_ref = exr.read_all(os.path.join(DIR, SCENE[s], ref_file_name),
                                   precision=load_dtype)
            ref_buffer[img_index, :, :, :3] = one_ref['diffuse']
            ref_buffer[img_index, :, :, 3:] = one_ref['specular']

            img_index += 1

    if flag_saving:
        # input_saving_name = "input_buffer"
        # ref_saving_name = "ref_buffer"
        #
        # extension_name = ".npy"
        # np.save(saving_pth + input_saving_name + extension_name, input_buffer)
        # np.save(saving_pth + ref_saving_name + extension_name, ref_buffer)

        start_ch = 0
        extension_name = ".npy"
        for b in range(len(BUFFER)):
            input_saving_name = "input_" + BUFFER[b]
            np.save(saving_pth + input_saving_name + extension_name,
                    input_buffer[:, :, :, start_ch:start_ch + ch_BUFFER[b]])
            start_ch += ch_BUFFER[b]

        np.save(saving_pth + "ref_diffuse" + extension_name,
                ref_buffer[:, :, :, :3])
        np.save(saving_pth + "ref_specular" + extension_name,
                ref_buffer[:, :, :, 3:6])

        # extension_name = ".h5"
        # hf = h5py.File(saving_pth + ref_saving_name + extension_name, 'w')
        # hf.create_dataset('input', data=input_buffer)
        # hf.create_dataset('ref', data=ref_buffer)
        # hf.close()

    return input_buffer, ref_buffer