示例#1
0
def get_nfor_img_and_get_relmse(in_pth):
    """
    !! 임시 !!
    기존에 tungsten 랜더러에서 나온 buffer에서 nfor을 가져옴.
    그리고 relMSE를 나오게 뽑음.
    """
    input_spp_list = [32, 100, 256, 512, 1024, 2048, 4096] # [32, 100, 256, 512, 1024]
    out_pth = in_pth + "/nfor"

    if not os.path.exists(out_pth):
        os.mkdir(out_pth)

    f = open(out_pth + '/nfor_relMSE.txt', 'w')

    ref_buffer = exr.read_all(os.path.join(in_pth, "out_64kspp.exr"))
    ref_color = ref_buffer['diffuse'] + ref_buffer['specular']

    for i in range(len(input_spp_list)):
        input_name = "out_" + str(input_spp_list[i]) + "spp.exr"

        input_buffer = exr.read_all(os.path.join(in_pth, input_name))

        input_nfor = input_buffer['nfor']

        rmse = calcRelMSE(input_nfor, ref_color)

        rmse_str = str(input_spp_list[i]) + "spp image relMSE : " + str(rmse)
        f.write(rmse_str)
        f.write("\n")
        print(rmse_str)

        exr.write(out_pth + "/" + str(input_spp_list[i]) + "spp_nfor.exr", input_nfor)
示例#2
0
def get_bmp_dataset_pair(dataset_dir):
    from scipy import misc

    resized_dataset = []
    dataset = []

    scale = 2
    files = os.listdir(dataset_dir)

    for file in files:
        filename = os.path.join(dataset_dir, file)
        data = misc.imread(filename)

        resized_data = misc.imresize(data, size=1.0 / scale, interp='bicubic')
        resized_data = misc.imresize(resized_data, size=scale * 100, interp='bicubic')

        dataset.append(data[:, :, 0:3])
        resized_dataset.append(resized_data[:, :, 0:3])

        exr.write('debug-bmp/' + file + '.exr', data[:, :, 0:3] / 255.0)
        exr.write('debug-bmp-resized/' + file + '.exr', resized_data[:, :, 0:3] / 255.0)

    resized_dataset = np.array(resized_dataset)
    dataset = np.array(dataset)

    return resized_dataset, dataset
示例#3
0
def save_all_exr_dataset(dataset_dirs, scene, target):
    all_data = []
    for dataset_dir in dataset_dirs:
        files = os.listdir(dataset_dir)
        files = [fn for fn in glob.glob(os.path.join(dataset_dir, '*.exr'))]

        for f in files:
            filename = os.path.join(dataset_dir, f)
            data = exr.read_all(filename)
            all_data.append(data['default'][:, :, 0:3])
            exr.write(os.path.join('D:/training/', target, scene, f), data['default'][:, :, 0:3])

    return np.array(all_data)
示例#4
0
def train_test_cmp_model_img_v1(train_input_img_buffer, train_ref_img_buffer,
                                test_input_img_buffer, test_ref_img_buffer,
                                params):
    """
    입력 구성: NGPT에 쓰이는 TRAIN AND TEST 버퍼.
    특징 #1 : 기존의 함수의 형태는 유지를 하고 NGPT에 맞게 FULL IMG로 학습이 진행이 됨.
    """
    """INITIAL SETTING"""
    # GPU index setting
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    N_train, H, W, ch_in = train_input_img_buffer.shape
    N_test = test_input_img_buffer.shape[0]
    """NORMALIZATION AND BUFFER SELECTION"""
    norm.normalize_input_img_cmp(train_input_img_buffer, train_ref_img_buffer)
    norm.normalize_input_img_cmp(test_input_img_buffer, test_ref_img_buffer)
    """SETTING DATA LOAD AND CORRESPONDING TRANSFORMS"""
    # define transform op
    transform_patch = transforms.Compose([
        FT.RandomCrop(params['patch_size']),
        # FT.RandomFlip_with_design(multi_crop=False),  # 현재 문제가 있음.
        FT.ToTensor(multi_crop=False)
    ])
    transform_img = transforms.Compose([FT.ToTensor(multi_crop=False)
                                        ])  # targeting for image

    # train data loader
    train_data = dataset.Supervised_dataset(train_input_img_buffer,
                                            train_ref_img_buffer,
                                            train=True,
                                            transform=transform_patch)
    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=params['batch_size'],
                                               shuffle=True)

    # test data loader
    test_data = dataset.Supervised_dataset(test_input_img_buffer,
                                           test_ref_img_buffer,
                                           train=False,
                                           transform=transform_img)
    test_loader = torch.utils.data.DataLoader(test_data,
                                              batch_size=1,
                                              shuffle=False)
    """NETWORK INITIALIZATION"""
    mynet = NGPT.Back_bone_NGPT_v1(params, channels_in=ch_in,
                                   out_dim=3).train().to(device)
    """SAVING THE TENSORBOARD"""
    out_tensorboard_folder_name = params["time_saving_folder"] + "/tensorboards"
    if not os.path.exists(out_tensorboard_folder_name):
        os.mkdir(out_tensorboard_folder_name)
    writer = tensorboard.SummaryWriter(out_tensorboard_folder_name)
    """SET LOSS AND OPTIMIZATION"""
    optimizer = optim.Adam(mynet.parameters(), lr=params['lr'])
    """TRAIN NETWORK"""
    epochs = params["epochs"]

    with tqdm(range(0, epochs), leave=True) as tnr:
        tnr.set_postfix(epoch=0, loss=-1.)

        for epoch in tnr:

            one_epoch_loss = 0.0
            num_iter_for_one_epoch = 0

            for data in train_loader:
                optimizer.zero_grad()

                x = data['input'].cuda()
                y = data['target'].cuda()

                y_pred = mynet(x)
                current_loss = mynet.loss(y_pred, y)
                current_loss.backward()
                optimizer.step()

                # 하나의 배치가 끝날 때 마다의 current loss를 보여줌
                tnr.set_postfix(epoch=epoch, loss=current_loss.item())

                one_epoch_loss += current_loss.data.item()
                num_iter_for_one_epoch += 1

            one_epoch_loss /= num_iter_for_one_epoch
            writer.add_scalar('training loss', one_epoch_loss, epoch)

            "PARAMETER SAVING"
            if (epoch + 1) % params['para_saving_epoch'] == 0:
                out_para_folder_name = params[
                    "time_saving_folder"] + "/parameters"
                if not os.path.exists(out_para_folder_name):
                    os.mkdir(out_para_folder_name)
                torch.save(mynet.state_dict(),
                           out_para_folder_name + "/latest_parameter")

            "INTERMEDIATE RESULTING PATCH SAVING"
            if (epoch + 1) % params["val_patches_saving_epoch"] == 0:
                inter_patch_folder_name = params[
                    "time_saving_folder"] + "./val_patches"
                if not os.path.exists(inter_patch_folder_name):
                    os.mkdir(inter_patch_folder_name)

                x_np_saving = other_tools.from_torch_tensor_img_to_full_res_numpy(
                    x)
                y_np_saving = other_tools.from_torch_tensor_img_to_full_res_numpy(
                    y)
                y_pred_np_saving = other_tools.from_torch_tensor_img_to_full_res_numpy(
                    y_pred)

                for l in range(x_np_saving.shape[0]):
                    exr.write(
                        inter_patch_folder_name + "/epoch_" + str(epoch) +
                        "_" + str(l) + "_color_in.exr", x_np_saving[l, :, :,
                                                                    0:3])

                    exr.write(
                        inter_patch_folder_name + "/epoch_" + str(epoch) +
                        "_" + str(l) + "_color_out.exr",
                        y_pred_np_saving[l, :, :, 0:3])

                    exr.write(
                        inter_patch_folder_name + "/epoch_" + str(epoch) +
                        "_" + str(l) + "_color_ref.exr", y_np_saving[l, :, :,
                                                                     0:3])
    """VALIDATE NETWORK"""
    with torch.no_grad():
        mynet.eval()
        out_folder_name = params["time_saving_folder"] + "/test_imgs"
        if not os.path.exists(out_folder_name):
            os.mkdir(out_folder_name)

        rmse_saving_pth = out_folder_name + "/rmse_list.txt"
        f = open(rmse_saving_pth, 'w')

        image_index = 0

        for data in test_loader:
            x = data['input'].cuda()
            y = data['target'].cuda()

            y_pred = mynet(x)

            "FROM TORCH TENSOR TO NUMPY TENSOR"
            x_np_saving = other_tools.from_torch_tensor_img_to_full_res_numpy(
                x[:, :3, :, :])
            y_np_saving = other_tools.from_torch_tensor_img_to_full_res_numpy(
                y)
            y_pred_np_saving = other_tools.from_torch_tensor_img_to_full_res_numpy(
                y_pred)

            x_np_saving = x_np_saving[0]
            y_np_saving = y_np_saving[0]
            y_pred_np_saving = y_pred_np_saving[0]

            x_np_saving = norm.denormalization_signed_log(x_np_saving)
            y_np_saving = norm.denormalization_signed_log(y_np_saving)
            y_pred_np_saving = norm.denormalization_signed_log(
                y_pred_np_saving)

            rmse = other_tools.calcRelMSE(y_pred_np_saving, y_np_saving)
            rmse_str = str(image_index) + " image relMSE : " + str(rmse)
            f.write(rmse_str)
            f.write("\n")
            print(rmse_str)

            "SAVING THE RESULTING IMAGES"
            exr.write(
                out_folder_name + "/" + params['saving_file_name'] + "_" +
                str(image_index) + "_input.exr", x_np_saving)
            exr.write(
                out_folder_name + "/" + params['saving_file_name'] + "_" +
                str(image_index) + "_gt.exr", y_np_saving)
            exr.write(
                out_folder_name + "/" + params['saving_file_name'] + "_" +
                str(image_index) + "_result.exr", y_pred_np_saving)

            image_index += 1

        f.close()
    writer.close()
示例#5
0
def test_model_stack_v1(test_input_stack, test_design_stack, test_GT_stack, params):
    """
    입력 구성: 오직 TEST 버퍼.
    순서: normalization -> making gird by order -> design matrix -> data loader -> network setting -> train -> test
    특징: param에 있는 trained model path에 따라 얻어진 모델 테스트
    """

    """INITIAL SETTING"""
    # GPU index setting
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    N_test, H_d, W_d, tile_size, _ = test_input_stack.shape
    H = H_d * params["tile_length"]
    W = W_d * params["tile_length"]

    """SETTING DATA LOAD AND CORRESPONDING TRANSFORMS"""
    # define transform op
    transform_img = transforms.Compose([FT.ToTensor_stack_with_design(multi_crop=False)])  # targeting for image

    # test data loader
    test_data = dataset.Supervised_dataset_with_design_v1(test_input_stack, test_design_stack, test_GT_stack,
                                                          train=False, transform=transform_img)
    test_loader = torch.utils.data.DataLoader(test_data, batch_size=1, shuffle=False)

    """NETWORK INITIALIZATION"""
    mynet = models_v1.NPR_net_stack_v1(ch_in=10, kernel_size=3, tile_length=4, n_layers=12, length_inter_tile=5,
                                       epsilon=0.01, pad_mode=1, no_stit_input=params['no_boundary_for_input'],
                                       no_stit_design=params['no_boundary_for_design']).train().to(device)

    """LOAD THE TRAINED PARAMETER"""
    parameter_pth = "./results/parameters/" + params['trained_parameter_pth'] + "/" + params['trained_parameter_name']
    mynet.load_state_dict(torch.load(parameter_pth))

    """TEST NETWORK"""
    with torch.no_grad():
        mynet.eval()

        out_folder_name = "./results/imgs/" + params['saving_folder_name']
        if not os.path.exists(out_folder_name):
            os.mkdir(out_folder_name)

        time_folder_name = out_folder_name + "/" + str(datetime.today().strftime("%Y_%m_%d_%H_%M"))
        if not os.path.exists(time_folder_name):
            os.mkdir(time_folder_name)

        rmse_saving_pth = time_folder_name + "/rmse_list.txt"
        f = open(rmse_saving_pth, 'w')

        image_index = 0

        for data in test_loader:
            x = data['input'].cuda()
            d = data['design'].cuda()
            y = data['target'].cuda()

            y_pred = mynet(x, d)

            "FROM TORCH TENSOR TO NUMPY TENSOR"
            x_np_saving = other_tools.from_torch_tensor_stack_to_full_res_numpy(x[:, :, :3, :, :])
            y_np_saving = other_tools.from_torch_tensor_stack_to_full_res_numpy(y)
            y_pred_np_saving = other_tools.from_torch_tensor_stack_to_full_res_numpy(y_pred)

            x_np_saving = x_np_saving[0]
            y_np_saving = y_np_saving[0]
            y_pred_np_saving = y_pred_np_saving[0]

            x_np_saving = norm.denormalization_signed_log(x_np_saving)
            y_np_saving = norm.denormalization_signed_log(y_np_saving)
            y_pred_np_saving = norm.denormalization_signed_log(y_pred_np_saving)

            rmse = other_tools.calcRelMSE(y_pred_np_saving, y_np_saving)
            rmse_str = str(image_index) + " image relMSE : " + str(rmse)
            f.write(rmse_str)
            f.write("\n")
            print(rmse_str)

            "SAVING THE RESULTING IMAGES"
            exr.write(time_folder_name + "/" + params['saving_file_name'] + "_" + str(image_index) + "_input.exr",
                      x_np_saving)
            exr.write(time_folder_name + "/" + params['saving_file_name'] + "_" + str(image_index) + "_gt.exr",
                      y_np_saving)
            exr.write(time_folder_name + "/" + params['saving_file_name'] + "_" + str(image_index) + "_result.exr",
                      y_pred_np_saving)

            image_index += 1

        f.close()
示例#6
0
def train_test_model_stack_v1(train_input_stack, train_design_stack, train_GT_stack,
                              test_input_stack, test_design_stack, test_GT_stack, params):
    """
    입력 구성: path reusing recon에 쓰이는 TRAIN AND TEST 버퍼.
    순서: normalization -> making gird by order -> design matrix -> data loader -> network setting -> train -> test
    특징 #1 : 최대한 간단하면서 지적된 문제점을 보안함
    특징 #2 : 일단 무조건 바운더리를 포함함.
    """

    """INITIAL SETTING"""
    # GPU index setting
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    N_train, H_d, W_d, tile_size, ch_in = train_input_stack.shape
    N_test = test_input_stack.shape[0]
    H = H_d * params["tile_length"]
    W = W_d * params["tile_length"]

    """SETTING DATA LOAD AND CORRESPONDING TRANSFORMS"""
    # define transform op
    transform_patch = transforms.Compose([
        FT.RandomCrop_stack_with_design(params['patch_size'], params['tile_length']),
        # FT.RandomFlip_with_design(multi_crop=False),  # 현재 문제가 있음.
        FT.ToTensor_stack_with_design(multi_crop=False)
    ])
    transform_img = transforms.Compose([FT.ToTensor_stack_with_design(multi_crop=False)])  # targeting for image

    # train data loader
    train_data = dataset.Supervised_dataset_with_design_v1(train_input_stack, train_design_stack, train_GT_stack,
                                                           train=True, transform=transform_patch)
    train_loader = torch.utils.data.DataLoader(train_data, batch_size=params['batch_size'], shuffle=True)

    # test data loader
    test_data = dataset.Supervised_dataset_with_design_v1(test_input_stack, test_design_stack, test_GT_stack,
                                                          train=False, transform=transform_img)
    test_loader = torch.utils.data.DataLoader(test_data, batch_size=1, shuffle=False)

    """NETWORK INITIALIZATION"""
    # mynet = models_v1.NPR_net_stack_v1(ch_in=10, kernel_size=3, tile_length=4, n_layers=12, length_inter_tile=7,
    #                                    epsilon=1, pad_mode=0, no_stit_input=params['no_boundary_for_input'],
    #                                    no_stit_design=params['no_boundary_for_design']).train().to(device)

    # mynet = models_v1.NPR_net_img_v1(ch_in=10, kernel_size=3, tile_length=4, n_layers=20, length_inter_tile=7,
    #                                  epsilon=0.01, pad_mode=0, no_stit_design=params['no_boundary_for_design']).train().to(device)

    mynet = models_v1.NPR_net_stack_v2(params, ch_in=10, kernel_size=3, tile_length=4, n_layers=16,
                                       length_inter_tile=7, epsilon=0.01, pad_mode=1, unfolded_loss=False,
                                       norm_in_window=False, W_half=True, only_diag=False, is_resnet=False).train().to(
        device)

    # mynet = models_v1.NPR_net_stack_chain_reg(params, ch_in=10, kernel_size=3, tile_length=4, n_layers=12,
    # length_inter_tile=3, num_reg=7, epsilon=0.01, pad_mode=0, unfolded_loss=True, norm_in_window=False, W_half=True, is_resnet=False).train().to(device)

    """SAVING THE TENSORBOARD"""
    out_tensorboard_folder_name = params["time_saving_folder"] + "/tensorboards"
    if not os.path.exists(out_tensorboard_folder_name):
        os.mkdir(out_tensorboard_folder_name)
    writer = tensorboard.SummaryWriter(out_tensorboard_folder_name)

    """SET LOSS AND OPTIMIZATION"""
    loss_fn = my_loss.loss_for_stit_v1(params['tile_length'], params["stitching_weights"], params['loss_type'])

    optimizer = optim.Adam(mynet.parameters(), lr=params['lr'])

    """TRAIN NETWORK"""
    epochs = params["epochs"]

    with tqdm(range(0, epochs), leave=True) as tnr:
        tnr.set_postfix(epoch=0, loss=-1.)

        for epoch in tnr:

            one_epoch_loss = 0.0
            num_iter_for_one_epoch = 0

            for data in train_loader:
                optimizer.zero_grad()

                x = data['input'].cuda()
                d = data['design'].cuda()
                y = data['target'].cuda()

                # d_np_saving = other_tools.from_torch_tensor_stack_to_full_res_numpy(d)
                # d_np_saving = d_np_saving[0]
                # exr.write("./train_design_albedo.exr", d_np_saving[:, :, 1:4])
                # exr.write("./train_design_depth.exr", d_np_saving[:, :, 4])
                # exr.write("./train_design_normal.exr", d_np_saving[:, :, 5:8])
                # exr.write("./train_design_xx.exr", d_np_saving[:, :, 8])
                #
                # x_np_saving = other_tools.from_torch_tensor_stack_to_full_res_numpy(x)
                # x_np_saving = x_np_saving[0]
                # exr.write("./train_input_color.exr", x_np_saving[:, :, 0:3])
                #
                # y_np_saving = other_tools.from_torch_tensor_stack_to_full_res_numpy(y)
                # y_np_saving = y_np_saving[0]
                # exr.write("./train_ref_color.exr", y_np_saving[:, :, 0:3])

                # for v1
                # y_pred = mynet(x, d)
                # current_loss = loss_fn(y_pred, y)
                # current_loss.backward()
                # optimizer.step()

                # for v2

                if (epoch + 1) % params["val_patches_saving_epoch"] == 0:
                    saving_flag = True
                else:
                    saving_flag = False

                y_pred, current_loss = mynet(x, d, y, saving_flag)
                current_loss.backward()
                optimizer.step()

                # y_pred_np_saving = other_tools.from_torch_tensor_stack_to_full_res_numpy(y_pred)
                # y_pred_np_saving = y_pred_np_saving[0]
                # exr.write("./train_out_color.exr", y_pred_np_saving[:, :, 0:3])

                # 하나의 배치가 끝날 때 마다의 current loss를 보여줌
                tnr.set_postfix(epoch=epoch, loss=current_loss.item())

                one_epoch_loss += current_loss.data.item()
                num_iter_for_one_epoch += 1

            one_epoch_loss /= num_iter_for_one_epoch
            writer.add_scalar('training loss', one_epoch_loss, epoch)

            "PARAMETER SAVING"
            if (epoch + 1) % params['para_saving_epoch'] == 0:
                out_para_folder_name = params["time_saving_folder"] + "/parameters"
                if not os.path.exists(out_para_folder_name):
                    os.mkdir(out_para_folder_name)
                torch.save(mynet.state_dict(), out_para_folder_name + "/latest_parameter")

            "INTERMEDIATE RESULTING PATCH SAVING"
            if (epoch + 1) % params["val_patches_saving_epoch"] == 0:
                inter_patch_folder_name = params["time_saving_folder"] + "./val_patches"
                if not os.path.exists(inter_patch_folder_name):
                    os.mkdir(inter_patch_folder_name)

                x_np_saving = other_tools.from_torch_tensor_stack_to_full_res_numpy(x)
                d_np_saving = other_tools.from_torch_tensor_stack_to_full_res_numpy(d)
                y_np_saving = other_tools.from_torch_tensor_stack_to_full_res_numpy(y)
                y_pred_np_saving = other_tools.from_torch_tensor_stack_to_full_res_numpy(y_pred)

                for l in range(x_np_saving.shape[0]):
                    exr.write(inter_patch_folder_name + "/epoch_" + str(epoch) + "_" + str(l) + "_color_in.exr",
                              x_np_saving[l, :, :, 0:3])

                    exr.write(inter_patch_folder_name + "/epoch_" + str(epoch) + "_" + str(l) + "_g_albedo.exr",
                              d_np_saving[l, :, :, 1:4])
                    exr.write(inter_patch_folder_name + "/epoch_" + str(epoch) + "_" + str(l) + "_g_depth.exr",
                              d_np_saving[l, :, :, 4])
                    exr.write(inter_patch_folder_name + "/epoch_" + str(epoch) + "_" + str(l) + "_g_normal.exr",
                              d_np_saving[l, :, :, 5:8])

                    exr.write(inter_patch_folder_name + "/epoch_" + str(epoch) + "_" + str(l) + "_color_out.exr",
                              y_pred_np_saving[l, :, :, 0:3])

                    exr.write(inter_patch_folder_name + "/epoch_" + str(epoch) + "_" + str(l) + "_color_ref.exr",
                              y_np_saving[l, :, :, 0:3])

    """VALIDATE NETWORK"""
    with torch.no_grad():
        mynet.eval()
        out_folder_name = params["time_saving_folder"] + "/test_imgs"
        if not os.path.exists(out_folder_name):
            os.mkdir(out_folder_name)

        rmse_saving_pth = out_folder_name + "/rmse_list.txt"
        f = open(rmse_saving_pth, 'w')

        image_index = 0

        for data in test_loader:
            x = data['input'].cuda()
            d = data['design'].cuda()
            y = data['target'].cuda()

            # for v1
            # y_pred = mynet(x, d)

            # for v2
            y_pred, current_loss = mynet(x, d, y, True)

            # d_np_saving = other_tools.from_torch_tensor_stack_to_full_res_numpy(d)
            # d_np_saving = d_np_saving[0]
            # exr.write("./test_design_albedo.exr", d_np_saving[:, :, 1:4])
            # exr.write("./test_design_depth.exr", d_np_saving[:, :, 4])
            # exr.write("./test_design_normal.exr", d_np_saving[:, :, 5:8])

            "FROM TORCH TENSOR TO NUMPY TENSOR"
            x_np_saving = other_tools.from_torch_tensor_stack_to_full_res_numpy(x[:, :, :3, :, :])
            y_np_saving = other_tools.from_torch_tensor_stack_to_full_res_numpy(y)
            y_pred_np_saving = other_tools.from_torch_tensor_stack_to_full_res_numpy(y_pred)

            x_np_saving = x_np_saving[0]
            y_np_saving = y_np_saving[0]
            y_pred_np_saving = y_pred_np_saving[0]

            x_np_saving = norm.denormalization_signed_log(x_np_saving)
            y_np_saving = norm.denormalization_signed_log(y_np_saving)
            y_pred_np_saving = norm.denormalization_signed_log(y_pred_np_saving)

            rmse = other_tools.calcRelMSE(y_pred_np_saving, y_np_saving)
            rmse_str = str(image_index) + " image relMSE : " + str(rmse)
            f.write(rmse_str)
            f.write("\n")
            print(rmse_str)

            "SAVING THE RESULTING IMAGES"
            exr.write(out_folder_name + "/" + params['saving_file_name'] + "_" + str(image_index) + "_input.exr",
                      x_np_saving)
            exr.write(out_folder_name + "/" + params['saving_file_name'] + "_" + str(image_index) + "_gt.exr",
                      y_np_saving)
            exr.write(out_folder_name + "/" + params['saving_file_name'] + "_" + str(image_index) + "_result.exr",
                      y_pred_np_saving)

            image_index += 1

        f.close()
    writer.close()
    a = 1
示例#7
0
def train_test_model_img_v1(train_input_img, train_GT_img, test_input_img, test_GT_img, params):
    """
    입력 구성: path reusing recon에 쓰이는 TRAIN AND TEST 버퍼.
    순서: normalization -> making gird by order -> design matrix -> data loader -> network setting -> train -> test
    특징: 함수의 이름에서 알 수 있듯이 stack으로 받는 것이 아닌 img형태로 데이터를 받음
    """

    """INITIAL SETTING"""
    # GPU index setting
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    N_train, H, W, _ = train_input_img.shape
    N_test = test_input_img.shape[0]
    H_d = int(H // params["tile_length"])
    W_d = int(W // params["tile_length"])

    """NORMALIZATION FOR STACK BUFFER"""
    norm.normalize_input_img_v1(train_input_img)
    norm.normalize_input_img_v1(test_input_img)

    norm.normalize_GT_v1(train_GT_img)
    norm.normalize_GT_v1(test_GT_img)

    """MAKING THE DESIGN MATRIX"""
    train_design_stack = design.generate_design_mat_from_img_v1(train_input_img[:, :, :, 3:],
                                                                params['tile_length'], params['grid_order'], False)
    test_design_stack = design.generate_design_mat_from_img_v1(test_input_img[:, :, :, 3:],
                                                               params['tile_length'], params['grid_order'], False)

    """SETTING DATA LOAD AND CORRESPONDING TRANSFORMS"""
    # define transform op
    transform_patch = transforms.Compose([
        FT.RandomCrop_img_stack_with_design(params['patch_size'], params['tile_length']),
        FT.RandomFlip_with_design(multi_crop=False),
        FT.ToTensor_img_stack_with_design(multi_crop=False)
    ])
    transform_img = transforms.Compose([FT.ToTensor_img_stack_with_design(multi_crop=False)])  # targeting for image

    # train data loader
    train_data = dataset.Supervised_dataset_with_design_v1(train_input_img, train_design_stack, train_GT_img,
                                                           train=True, transform=transform_patch)
    train_loader = torch.utils.data.DataLoader(train_data, batch_size=params['batch_size'], shuffle=True)

    # test data loader
    test_data = dataset.Supervised_dataset_with_design_v1(test_input_img, test_design_stack, test_GT_img,
                                                          train=False, transform=transform_img)
    test_loader = torch.utils.data.DataLoader(test_data, batch_size=1, shuffle=False)

    """NETWORK INITIALIZATION"""
    mynet = models_v1.NPR_net_img_v1(channels_in=10, kernel_size=3, tile_length=4, n_layers=20, length_inter_tile=5,
                                     epsilon=0.01, pad_mode=1).train().to(device)

    """SAVING THE TENSORBOARD"""
    out_tensorboard_folder_name = "./results/tensorboards/" + params['saving_folder_name']
    if not os.path.exists(out_tensorboard_folder_name):
        os.mkdir(out_tensorboard_folder_name)
    writer = tensorboard.SummaryWriter(out_tensorboard_folder_name + "/" + params['saving_file_name'] + "_" +
                                       str(datetime.today().strftime("%Y_%m_%d_%H_%M")))

    """SET LOSS AND OPTIMIZATION"""
    loss_fn = my_loss.loss_for_stit_v1(params['tile_length'], params["stitching_weights"], params['loss_type'])
    optimizer = optim.Adam(mynet.parameters(), lr=params['lr'])

    """TRAIN NETWORK"""
    epochs = params["epochs"]

    with tqdm(range(0, epochs), leave=True) as tnr:
        tnr.set_postfix(epoch=0, loss=-1.)

        for epoch in tnr:

            one_epoch_loss = 0.0
            num_iter_for_one_epoch = 0

            for data in train_loader:
                optimizer.zero_grad()

                x = data['input'].cuda()
                d = data['design'].cuda()
                y = data['target'].cuda()

                y_pred = mynet(x, d)

                current_loss = loss_fn(y_pred, y)
                current_loss.backward()
                optimizer.step()

                # 하나의 배치가 끝날 때 마다의 current loss를 보여줌
                tnr.set_postfix(epoch=epoch, loss=current_loss.item())

                one_epoch_loss += current_loss.data.item()
                num_iter_for_one_epoch += 1

            one_epoch_loss /= num_iter_for_one_epoch
            writer.add_scalar('training loss', one_epoch_loss, epoch)

            "PARAMETER SAVING"
            if (epoch + 1) % params['para_saving_epoch'] == 0:
                out_para_folder_name = "./results/parameters/" + params['saving_folder_name']
                if not os.path.exists(out_para_folder_name):
                    os.mkdir(out_para_folder_name)
                torch.save(mynet.state_dict(), out_para_folder_name + "/latest_parameter")

    """VALIDATE NETWORK"""
    with torch.no_grad():
        mynet.eval()

        out_folder_name = "./results/imgs/" + params['saving_folder_name']
        if not os.path.exists(out_folder_name):
            os.mkdir(out_folder_name)

        time_folder_name = out_folder_name + "/" + str(datetime.today().strftime("%Y_%m_%d_%H_%M"))
        if not os.path.exists(time_folder_name):
            os.mkdir(time_folder_name)

        rmse_saving_pth = time_folder_name + "/rmse_list.txt"
        f = open(rmse_saving_pth, 'w')

        image_index = 0

        for data in test_loader:
            x = data['input'].cuda()
            d = data['design'].cuda()
            y = data['target'].cuda()

            y_pred = mynet(x, d)

            # d_np_saving = other_tools.from_torch_tensor_stack_to_full_res_numpy(d)
            # d_np_saving = d_np_saving[0]
            # exr.write("./test_design_albedo.exr", d_np_saving[:, :, 1:4])
            # exr.write("./test_design_depth.exr", d_np_saving[:, :, 4])
            # exr.write("./test_design_normal.exr", d_np_saving[:, :, 5:8])

            "FROM TORCH TENSOR TO NUMPY TENSOR"
            x_np_saving = other_tools.from_torch_tensor_stack_to_full_res_numpy(x[:, :, :3, :, :])
            y_np_saving = other_tools.from_torch_tensor_stack_to_full_res_numpy(y)
            y_pred_np_saving = other_tools.from_torch_tensor_stack_to_full_res_numpy(y_pred)

            x_np_saving = x_np_saving[0]
            y_np_saving = y_np_saving[0]
            y_pred_np_saving = y_pred_np_saving[0]

            x_np_saving = norm.denormalization_signed_log(x_np_saving)
            y_np_saving = norm.denormalization_signed_log(y_np_saving)
            y_pred_np_saving = norm.denormalization_signed_log(y_pred_np_saving)

            rmse = other_tools.calcRelMSE(y_pred_np_saving, y_np_saving)
            rmse_str = str(image_index) + " image relMSE : " + str(rmse)
            f.write(rmse_str)
            f.write("\n")
            print(rmse_str)

            "SAVING THE RESULTING IMAGES"
            exr.write(time_folder_name + "/" + params['saving_file_name'] + "_" + str(image_index) + "_input.exr",
                      x_np_saving)
            exr.write(time_folder_name + "/" + params['saving_file_name'] + "_" + str(image_index) + "_gt.exr",
                      y_np_saving)
            exr.write(time_folder_name + "/" + params['saving_file_name'] + "_" + str(image_index) + "_result.exr",
                      y_pred_np_saving)

            image_index += 1

        f.close()
    writer.close()
    a = 1
示例#8
0
def test_for_one_exr(params, input_buffer, ref_buffer):
    """
    임시로 test 함수를 만듦.
    나중에는 ttv에 이런 기능을 넣어 통합할 예정.
    이름대로 하나의 exr buffer를 갖고 test를 하는 함수임.

    """
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # device = torch.device("cpu")
    """SETTING DATA LOAD FOR TEST"""
    transform_img = transforms.Compose([FT.ToTensor(multi_crop=False)
                                        ])  # targeting for image

    # test data loader
    test_data = dataset.Supervised_dataset(input_buffer,
                                           ref_buffer,
                                           train=False,
                                           transform=transform_img)
    test_loader = torch.utils.data.DataLoader(test_data,
                                              batch_size=1,
                                              shuffle=False)
    """LOSS SETTING"""
    if params['loss_type'] == 'l2':
        loss_fn = torch.nn.MSELoss()
    elif params['loss_type'] == 'l1':
        loss_fn = torch.nn.L1Loss()
    elif params['loss_type'] == 'custom_loss_v1':
        loss_fn = my_loss.my_custom_loss_v1(params['loss_weights'],
                                            params['SMAP_loss'],
                                            params['reinhard_loss'])
    else:
        print("unknown loss!")
        return

    if params['network_name'] == "WLS_net_v1":
        mynet = models_v1.WLS_net_v1(params,
                                     loss_fn,
                                     ch_in=10,
                                     kernel_size=3,
                                     n_layers=50,
                                     length_p_kernel=21,
                                     epsilon=0.001,
                                     pad_mode=0,
                                     loss_type=0,
                                     kernel_accum=True,
                                     norm_in_window=False,
                                     is_resnet=True).train().to(device)

    elif params['network_name'] == "KPCN_v1":
        mynet = models_KPCN.KPCN_v1(params,
                                    loss_fn,
                                    ch_in=10,
                                    kernel_size=3,
                                    n_layers=50,
                                    length_p_kernel=21,
                                    no_soft_max=False,
                                    pad_mode=0,
                                    is_resnet=True).train().to(device)

    parameter_pth = params[
        'trained_model_folder_pth'] + "/parameters/" + params[
            'trained_parameter_name']
    saved_torch_para = torch.load(parameter_pth)

    # mynet.load_state_dict(saved_torch_para['model_state_dict'])
    mynet = saved_torch_para['model']
    """VALIDATE NETWORK"""
    with torch.no_grad():
        mynet.eval()

        out_folder_name = params["saving_sub_folder_name"] + "/test_imgs"
        if not os.path.exists(out_folder_name):
            os.mkdir(out_folder_name)

        rmse_saving_pth = out_folder_name + "/rmse_list.txt"
        f = open(rmse_saving_pth, 'w')

        image_index = 0

        f.write(str(params["saving_folder_name"]))
        f.write("\n")

        for data in test_loader:
            x = data['input'].to(device).to(torch.float32)
            y = data['target'].to(device).to(torch.float32)

            if params['network_name'] == "WLS_net_v1":
                # y_pred = mynet(x, y, True)
                y_pred = mynet.test_chunkwise(x, chunk_size=200)

            elif params['network_name'] == "KPCN_v1":
                y_pred = mynet(x, y, only_img_out=True)
                # y_pred = mynet.test_chunkwise(x, chunk_size=200)

            elif params['network_name'] == "WLS_net_FG_v1":
                # y_pred = mynet(x, y, True)
                y_pred = mynet.test_chunkwise(x, chunk_size=200)

            "FROM TORCH TENSOR TO NUMPY TENSOR"
            x_np_saving = other_tools.from_torch_tensor_img_to_full_res_numpy(
                x[:, :3, :, :])
            y_np_saving = other_tools.from_torch_tensor_img_to_full_res_numpy(
                y)
            y_pred_np_saving = other_tools.from_torch_tensor_img_to_full_res_numpy(
                y_pred)

            x_np_saving = x_np_saving[0]
            y_np_saving = y_np_saving[0]
            y_pred_np_saving = y_pred_np_saving[0]

            x_np_saving = norm.denormalization_signed_log(x_np_saving)
            y_np_saving = norm.denormalization_signed_log(y_np_saving)
            y_pred_np_saving = norm.denormalization_signed_log(
                y_pred_np_saving)

            rmse = other_tools.calcRelMSE(y_pred_np_saving, y_np_saving)
            rmse_str = str(image_index) + " image relMSE : " + str(rmse)
            f.write(rmse_str)
            f.write("\n")
            print(rmse_str)

            "SAVING THE RESULTING IMAGES"
            exr.write(
                out_folder_name + "/" + params['saving_file_name'] + "_" +
                str(image_index) + "_input.exr", x_np_saving)
            exr.write(
                out_folder_name + "/" + params['saving_file_name'] + "_" +
                str(image_index) + "_gt.exr", y_np_saving)
            exr.write(
                out_folder_name + "/" + params['saving_file_name'] + "_" +
                str(image_index) + "_result.exr", y_pred_np_saving)

            image_index += 1
        f.close()
示例#9
0
def train_test_model_v1(params, train_input_buffer, train_ref_buffer,
                        test_input_buffer, test_ref_buffer, TEST_MODE,
                        RE_TRAIN):
    """
    입력 구성: params, input_buffer = norm. of [color, g-buffer], ref_buffer = norm. of color
    순서: initial setting -> data transformation function setting -> data load -> train and test
    특징: PR 버전의 ttv를 그대로 활용.
    """
    """INITIAL SETTING"""
    # GPU index setting
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    _, H, W, _ = test_input_buffer.shape

    # channel index
    ch_col_start = 0
    if params["color_merge"]:
        ch_col_end = 3
    else:
        ch_col_end = 3

    if params['fixing_random_seed']:
        np.random.seed(0)

    ch_albedo_start = ch_col_end
    ch_albedo_end = ch_albedo_start + 3
    ch_depth_start = ch_albedo_end
    ch_depth_end = ch_depth_start + 1
    ch_normal_start = ch_depth_end
    ch_normal_end = ch_normal_start + 3

    # for test or retraining
    parameter_pth = params[
        'trained_model_folder_pth'] + "/parameters/" + params[
            'trained_parameter_name']

    if not TEST_MODE:
        """SETTING DATA LOAD AND CORRESPONDING TRANSFORMS FOR TRAINING"""
        # define transform op
        transform = transforms.Compose([
            FT.RandomCrop(params['patch_size']),
            FT.RandomFlip(multi_crop=params['multi_crop']),
            FT.PermuteColor(multi_crop=params['multi_crop']),
            FT.ToTensor(multi_crop=False)
        ])
        # train data loader
        train_data = dataset.Supervised_dataset(train_input_buffer,
                                                train_ref_buffer,
                                                train=True,
                                                transform=transform)
        train_loader = torch.utils.data.DataLoader(
            train_data, batch_size=params['batch_size'], shuffle=True)
        """SAVING THE TENSORBOARD"""
        out_tensorboard_folder_name = params[
            "saving_sub_folder_name"] + "/tensorboards"
        if not os.path.exists(out_tensorboard_folder_name):
            os.mkdir(out_tensorboard_folder_name)
        writer = tensorboard.SummaryWriter(out_tensorboard_folder_name)

        "PARAMETER LOAD FOR RETRAIN"
        if RE_TRAIN:
            saved_torch_para = torch.load(parameter_pth)
            start_epoch = saved_torch_para['epoch']
        else:
            saved_torch_para = None
            start_epoch = 0

        end_epochs = params["epochs"]

    else:
        "PARAMETER LOAD FOR TEST"
        saved_torch_para = torch.load(parameter_pth)
        start_epoch = 0
        end_epochs = 0
    """LOSS SETTING"""
    if params['loss_type'] == 'l2':
        loss_fn = torch.nn.MSELoss()
    elif params['loss_type'] == 'l1':
        loss_fn = torch.nn.L1Loss()
    elif params['loss_type'] == 'custom_loss_v1':
        loss_fn = my_loss.my_custom_loss_v1(params['loss_weights'],
                                            params['SMAP_loss'],
                                            params['reinhard_loss'])
    else:
        print("unknown loss!")
        return
    """NETWORK INITIALIZATION"""
    saving_code_folder_name = params["saving_sub_folder_name"] + "/net_code"
    if not os.path.exists(saving_code_folder_name):
        os.mkdir(saving_code_folder_name)

    if params['network_name'] == "WLS_net_v1":
        mynet = models_v1.WLS_net_v1(params,
                                     loss_fn,
                                     ch_in=10,
                                     kernel_size=3,
                                     n_layers=50,
                                     length_p_kernel=21,
                                     epsilon=0.001,
                                     pad_mode=0,
                                     loss_type=0,
                                     kernel_accum=True,
                                     norm_in_window=False,
                                     is_resnet=True).train().to(device)
        shutil.copy("../Models/models_v1.py",
                    saving_code_folder_name + "/saved_models_v1.py")

    elif params['network_name'] == "KPCN_v1":
        mynet = models_KPCN.KPCN_v1(params,
                                    loss_fn,
                                    ch_in=10,
                                    kernel_size=3,
                                    n_layers=50,
                                    length_p_kernel=21,
                                    no_soft_max=False,
                                    pad_mode=0,
                                    is_resnet=True).train().to(device)
        shutil.copy("../Models/models_KPCN.py",
                    saving_code_folder_name + "/saved_models_KPCN.py")

    elif params['network_name'] == "WLS_net_FG_v1":
        if params['network_index'] == 0:
            print("WLS_net_FG_v1 and FG")
            mynet = models_v2.WLS_net_FG_v1(params,
                                            loss_fn,
                                            ch_in=10,
                                            kernel_size=3,
                                            n_layers=50,
                                            length_p_kernel=21,
                                            epsilon=0.0001,
                                            pad_mode=0,
                                            loss_type=0,
                                            kernel_accum=False,
                                            norm_in_window=True,
                                            is_resnet=True,
                                            FG_mode=1).train().to(device)
        else:
            # g-buffer denoisor
            print("WLS_net_FG_v1 and KPCN for b-buffer denoising")
            mynet = models_v2.WLS_net_FG_v1(params,
                                            loss_fn,
                                            ch_in=10,
                                            kernel_size=3,
                                            n_layers=50,
                                            length_p_kernel=21,
                                            epsilon=0.0001,
                                            pad_mode=0,
                                            loss_type=0,
                                            kernel_accum=False,
                                            norm_in_window=False,
                                            is_resnet=True,
                                            FG_mode=2).train().to(device)
        shutil.copy("../Models/models_v2.py",
                    saving_code_folder_name + "/saved_models_v2.py")

    # re train or test mode
    if RE_TRAIN or TEST_MODE:
        "old and new"
        # mynet.load_state_dict(saved_torch_para['model_state_dict'])
        mynet = saved_torch_para['model']
    """SET LOSS AND OPTIMIZATION"""
    optimizer = optim.Adam(mynet.parameters(), lr=params['lr'])
    if RE_TRAIN or TEST_MODE:
        optimizer.load_state_dict(saved_torch_para['optimizer_state_dict'])
    """TRAIN NETWORK"""
    with tqdm(range(start_epoch, end_epochs), leave=True) as tnr:
        tnr.set_postfix(epoch=0, loss=-1.)

        for epoch in tnr:

            one_epoch_loss = 0.0
            num_iter_for_one_epoch = 0

            for data in train_loader:
                optimizer.zero_grad()

                "주의) 모든 data는 float16임. 그래서 netwrork와 맞추기 위해 float32로 변경"
                x = data['input'].cuda().to(torch.float32)
                y = data['target'].cuda().to(torch.float32)

                if (epoch + 1) % params["val_patches_saving_epoch"] == 0:
                    full_res_out = True
                else:
                    full_res_out = False

                if params['network_name'] == "WLS_net_v1":
                    y_pred, current_loss = mynet(x, y, False, full_res_out)
                    # current_loss = loss_fn(y_pred, y)

                elif params['network_name'] == "KPCN_v1":
                    y_pred, current_loss = mynet(x, y, only_img_out=False)

                elif params['network_name'] == "WLS_net_FG_v1":
                    y_pred, current_loss = mynet(x, y, False, full_res_out)
                    # current_loss = loss_fn(y_pred, y)

                current_loss.backward()
                optimizer.step()

                # 하나의 배치가 끝날 때 마다의 current loss를 보여줌
                tnr.set_postfix(epoch=epoch, loss=current_loss.item())

                one_epoch_loss += current_loss.data.item()
                num_iter_for_one_epoch += 1

            one_epoch_loss /= num_iter_for_one_epoch
            writer.add_scalar('training loss', one_epoch_loss, epoch)

            "PARAMETER SAVING"
            if (epoch + 1) % params['para_saving_epoch'] == 0:
                out_para_folder_name = params[
                    "saving_sub_folder_name"] + "/parameters"
                if not os.path.exists(out_para_folder_name):
                    os.mkdir(out_para_folder_name)
                torch.save(mynet.state_dict(),
                           out_para_folder_name + "/latest_parameter")

                torch.save(
                    {
                        'epoch': epoch,
                        # 'model_state_dict': mynet.state_dict(),
                        'model': mynet,
                        'optimizer_state_dict': optimizer.state_dict(),
                        'loss': current_loss
                    },
                    out_para_folder_name + "/latest_parameter")

                # torch.load(out_para_folder_name + "/latest_parameter")

            "INTERMEDIATE RESULTING PATCH SAVING"
            if (epoch + 1) % params["val_patches_saving_epoch"] == 0:
                inter_patch_folder_name = params[
                    "saving_sub_folder_name"] + "/val_patches"
                if not os.path.exists(inter_patch_folder_name):
                    os.mkdir(inter_patch_folder_name)

                x_np_saving = other_tools.from_torch_tensor_img_to_full_res_numpy(
                    x)
                y_np_saving = other_tools.from_torch_tensor_img_to_full_res_numpy(
                    y)
                y_pred_np_saving = other_tools.from_torch_tensor_img_to_full_res_numpy(
                    y_pred)

                for l in range(x_np_saving.shape[0]):
                    exr.write(
                        inter_patch_folder_name + "/epoch_" + str(epoch) +
                        "_" + str(l) + "_color_in.exr",
                        x_np_saving[l, :, :, ch_col_start:ch_col_end])

                    exr.write(
                        inter_patch_folder_name + "/epoch_" + str(epoch) +
                        "_" + str(l) + "_g_albedo.exr",
                        x_np_saving[l, :, :, ch_albedo_start:ch_albedo_end])
                    exr.write(
                        inter_patch_folder_name + "/epoch_" + str(epoch) +
                        "_" + str(l) + "_g_depth.exr",
                        x_np_saving[l, :, :, ch_depth_start:ch_depth_end])
                    exr.write(
                        inter_patch_folder_name + "/epoch_" + str(epoch) +
                        "_" + str(l) + "_g_normal.exr",
                        x_np_saving[l, :, :, ch_normal_start:ch_normal_end])

                    exr.write(
                        inter_patch_folder_name + "/epoch_" + str(epoch) +
                        "_" + str(l) + "_color_out.exr",
                        y_pred_np_saving[l, :, :, 0:3])

                    exr.write(
                        inter_patch_folder_name + "/epoch_" + str(epoch) +
                        "_" + str(l) + "_color_ref.exr", y_np_saving[l, :, :,
                                                                     0:3])
        # writer.close()
    """SETTING DATA LOAD FOR TEST"""
    transform_img = transforms.Compose([FT.ToTensor(multi_crop=False)
                                        ])  # targeting for image

    # test data loader
    test_data = dataset.Supervised_dataset(test_input_buffer,
                                           test_ref_buffer,
                                           train=False,
                                           transform=transform_img)
    test_loader = torch.utils.data.DataLoader(test_data,
                                              batch_size=1,
                                              shuffle=False)
    """VALIDATE NETWORK"""
    with torch.no_grad():
        mynet.eval()

        out_folder_name = params["saving_sub_folder_name"] + "/test_imgs"
        if not os.path.exists(out_folder_name):
            os.mkdir(out_folder_name)

        rmse_saving_pth = out_folder_name + "/rmse_list.txt"
        f = open(rmse_saving_pth, 'w')

        image_index = 0

        f.write(str(params["saving_folder_name"]))
        f.write("\n")

        for data in test_loader:
            x = data['input'].cuda().to(torch.float32)
            y = data['target'].cuda().to(torch.float32)

            if params['network_name'] == "WLS_net_v1":
                # y_pred = mynet(x, y, True)
                y_pred = mynet.test_chunkwise(x, chunk_size=200)

            elif params['network_name'] == "KPCN_v1":
                y_pred = mynet(x, y, only_img_out=True)
                # y_pred = mynet.test_chunkwise(x, chunk_size=200)

            elif params['network_name'] == "WLS_net_FG_v1":
                # y_pred = mynet(x, y, True)
                y_pred = mynet.test_chunkwise(x, chunk_size=200)

            "FROM TORCH TENSOR TO NUMPY TENSOR"
            x_np_saving = other_tools.from_torch_tensor_img_to_full_res_numpy(
                x[:, :3, :, :])
            y_np_saving = other_tools.from_torch_tensor_img_to_full_res_numpy(
                y)
            y_pred_np_saving = other_tools.from_torch_tensor_img_to_full_res_numpy(
                y_pred)

            x_np_saving = x_np_saving[0]
            y_np_saving = y_np_saving[0]
            y_pred_np_saving = y_pred_np_saving[0]

            x_np_saving = norm.denormalization_signed_log(x_np_saving)
            y_np_saving = norm.denormalization_signed_log(y_np_saving)
            y_pred_np_saving = norm.denormalization_signed_log(
                y_pred_np_saving)

            rmse = other_tools.calcRelMSE(y_pred_np_saving, y_np_saving)
            rmse_str = str(image_index) + " image relMSE : " + str(rmse)
            f.write(rmse_str)
            f.write("\n")
            print(rmse_str)

            "SAVING THE RESULTING IMAGES"
            exr.write(
                out_folder_name + "/" + params['saving_file_name'] + "_" +
                str(image_index) + "_input.exr", x_np_saving)
            exr.write(
                out_folder_name + "/" + params['saving_file_name'] + "_" +
                str(image_index) + "_gt.exr", y_np_saving)
            exr.write(
                out_folder_name + "/" + params['saving_file_name'] + "_" +
                str(image_index) + "_result.exr", y_pred_np_saving)

            image_index += 1
        f.close()