Example #1
0
def load_iteration(image_file, image_sqr_file):
    image_exr = pyexr.open(image_file)
    header = image_exr.input_file.header()

    spp = header['spp']
    image_data = pyexr.read(image_file)
    image_sqr_data = pyexr.read(image_sqr_file)
    return image_data, image_sqr_data, spp
Example #2
0
def init(scene, gt, includes, max_spp):
    if gt:
        gt = pyexr.read(gt)
    else:
        gt = pyexr.read(f"{scene}-gt.exr")

    data_sources = find_data_sources(Path(".."), includes, scene)
    if not data_sources:
        print("No data sources found!")
        sys.exit(1)

    for data_source in data_sources:
        print(data_source)

    run(gt, data_sources, max_spp)
Example #3
0
	def __init__(self, filename):
		self.filename = filename
		paths = glob.glob(os.path.join(SCRIPT_DIR, "images", self.filename + ".*"))
		if not paths:
			raise ValueError(f"Invalid image name '{filename}''")
		path = paths[0] # Use first path that exists
		self.data = exr.read(path)
		if self.data.shape[-1] > 3:
			self.data = self.data[:,:,0:3]
		self.data_tf = tf.constant(self.data, dtype=tf.float32)
		super().__init__('unit', self.data.shape[-1], 2, {}, 0, 0)
Example #4
0
def run(input_filename, output_filename):
    exr = pyexr.read(input_filename)

    height, width, channels = exr.shape

    nan_indices = np.concatenate(
        [np.argwhere(np.isinf(exr)),
         np.argwhere(np.isnan(exr))])

    for index in nan_indices:
        exr[tuple(index)] = neighborhood_average(exr, index)

    pyexr.write(output_filename, exr)

    print(f"Removed {len(nan_indices)} nans")
Example #5
0
def build_dataset(data_source):
    exrs = []

    i = 0
    while True:
        data_path = Path(data_source)
        exr_path = data_path / f"auto-{2**i:05d}spp.exr"
        if not exr_path.exists():
            break

        exrs.append(pyexr.read(str(exr_path)))

        i += 1

    return exrs
Example #6
0
def convert_to_density_mesh(input_path, output_path):
    num_points_per_axis = 400
    bounds = np.array([[0, 1], [0, 1]])

    x = np.linspace(bounds[0][0], bounds[0][1], num_points_per_axis)
    y = np.linspace(bounds[1][0], bounds[1][1], num_points_per_axis)

    figure, axes = plt.subplots(1, 1)

    image = pyexr.read(str(input_path))

    density_mesh(axes, x, y, image[:, :, 0], bounds, flip_y=True)

    plt.tight_layout()
    plt.savefig(str(output_path), bbox_inches='tight')
    plt.close()
Example #7
0
def init(exr_path):
    exr = pyexr.read(exr_path)

    max_coords = (-1, -1)
    max_value = 0

    height, width, _ = exr.shape

    for y in range(height):
        for x in range(width):
            value = np.sum(exr[y, x, :])
            if value > max_value:
                max_value = value
                max_coords = (x, y)

    print(f"max value: {max_value}")
    print(f"max coords: {max_coords}")
Example #8
0
def build_dataset(error_fn, gt, data_path):
    mses = []

    i = 0
    while True:
        exr_path = data_path / f"auto-{2**i:05d}spp.exr"
        if not exr_path.exists():
            break

        mse = error_fn(
            pyexr.read(str(exr_path)),
            gt
        )
        mses.append(mse)

        i += 1

    return mses
Example #9
0
def run(filename1, filename2):
    return compare(
        pyexr.read(str(filename1)),
        pyexr.read(str(filename2))
    )
Example #10
0
    print("Missing arguments: mix_albedo.py <path> <idx>")
    exit(0)

path = sys.argv[1]
num = sys.argv[2]


def get_abs(path, file, num):
    postfix = "_" + str(num) if int(num) > 0 else ""
    return path + "/" + file + postfix + ".png"


albedo = read_png(get_abs(path, "albedo", num)) / 255
if albedo.shape[2] > 3:
    albedo = albedo[:, :, :3]
shading = np.array(pyexr.read(path + "/shading.exr")) / 255

if albedo.shape[:2] != shading.shape[:2]:
    print("Resizing albedo")
    albedo = cv2.resize(albedo, (shading.shape[1], shading.shape[0]))

color = albedo * shading

cv2.imshow("Shading", shading)
cv2.imshow("Color", color)
cv2.waitKey(0)

color = np.reshape(color, (-1, color.shape[1] * 3))
color = np.array(color * 255, dtype=np.uint8)

with open(get_abs(path, "color", num), "wb") as f:
Example #11
0
if __name__ == '__main__':

    path = "path/to/directory"

    exr_list = glob.glob(os.path.join(path, "mts2_*.exr"))

    exr_list.sort()

    assert (len(exr_list) % 3 == 0)

    base_color_path = exr_list[0]
    base_color_square_path = exr_list[1]

    # may have 0 because of numerical issue
    base_r = pyexr.read(base_color_path, "R")
    base_g = pyexr.read(base_color_path, "G")
    base_b = pyexr.read(base_color_path, "B")

    base_var_r = pyexr.read(base_color_square_path, "R") - np.square(base_r)
    base_var_g = pyexr.read(base_color_square_path, "G") - np.square(base_g)
    base_var_b = pyexr.read(base_color_square_path, "B") - np.square(base_b)
    a = pyexr.read(base_color_path, "A")

    base_var_avg = (base_var_r + base_var_g + base_var_b) / 3
    var_base = np.concatenate((base_var_avg, base_var_avg, base_var_avg, a),
                              axis=2)
    pyexr.write(os.path.join(path, "variance_0.exr"), var_base)

    for i in range(3, len(exr_list), 3):
Example #12
0
def generate_zero_map(source_path, inferred_path, out_path):
    generated_map = zero_map(
        pyexr.read(str(source_path)),
        pyexr.read(str(inferred_path)),
    )
    pyexr.write(str(out_path), generated_map)
Example #13
0
def convert(exr_path, png_path):
    image = pyexr.read(str(exr_path))

    srgb = clip(to_srgb(image))
    save_png(srgb, png_path)
Example #14
0
#!/usr/bin/env python3

import os, sys
import pyexr

if len(sys.argv) != 3:
    print(
        "mergexr: Merge multiple EXRs into a single one with multiple channels."
    )
    print("Usage: mergexr.py input_dir output_file")
    exit()

dir = sys.argv[1]
output = sys.argv[2]

data = {}

for dirpath, dirnames, filenames in os.walk(dir):
    for file in filenames:
        buffer_name, ext = os.path.splitext(file)
        if ext == '.exr':
            fullpath = os.path.join(dirpath, file)
            data[buffer_name] = pyexr.read(fullpath)

pyexr.write(output, data)
Example #15
0
import os, sys
import glob
import pyexr

if len(sys.argv) < 3:
    print("exrmul: Multiply all EXRs specified or in a dir.")
    print("Usage: exrmul.py output_EXR {input_EXR_dir | input_EXRs...}")
    exit()

output_name = sys.argv[1]
path = sys.argv[2]
exr_files = []
if os.path.isdir(path):
    exr_files = glob.glob(os.path.join(path, "*.exr"))
else:
    for i in range(2, len(sys.argv)):
        exr_files.append(sys.argv[i])

output_exr = None
for file in exr_files:
    if output_exr is None:
        output_exr = pyexr.read(file)
    else:
        output_exr *= pyexr.read(file)

if output_exr.shape[2] == 1:
    pyexr.write(output_name, output_exr, channel_names=['Y'])
else:
    pyexr.write(output_name, output_exr)
Example #16
0
def run_exr(input_file, output_file, model):
    lr_img = pyexr.read(input_file)[:, :, 0:3] * 255
    result = model.predict(lr_img) / 255.
    pyexr.write(output_file, result)
def convert_rgb_to_single_channel(exr_path):
    exr = pyexr.read(str(exr_path))

    pixel_average = np.mean(exr, axis=2)
    return np.stack([pixel_average, pixel_average, pixel_average], axis=2)
Example #18
0
def compute(ref_folder, output, methods, scenes, pad=21):
    """Compute metrics from .exr images and saves to disk.

    Args:
        ref_folder(str): path to the folder containing reference images.
        output(str): path to the .csv output file.
        methods(list of str): paths to the folders containing the .exr results,
            one for each method to evaluate. Accepts a list of methods or a
            .txt file listing the methods.
        scenes(str, list of str): name of the .exr scenes to include in the
            comparison. Accepts a list of filenames or a .txt file listing the
            filenames.
        pad(int): number of pixels to ignore on each side when computing
            the metrics.
    """
    scores = pd.DataFrame(columns=["method", "scene", "spp", "valid"] +
                          list(METRIC_LABELS.keys()))

    scenes = _parse_list_or_txt(scenes)
    methods = _parse_list_or_txt(methods)

    n_scenes = len(scenes)
    n_methods = len(methods)

    LOG.info("Evaluating %d scenes and %d methods", n_scenes, n_methods)

    filepaths = []

    if not os.path.splitext(output)[-1] == ".csv":
        mess = "Metric computation expects a .csv output path."
        LOG.error(mess)
        raise RuntimeError(mess)

    dirname = os.path.dirname(output)
    os.makedirs(dirname, exist_ok=True)

    for s_idx, scene in enumerate(scenes):
        sname = os.path.splitext(scene)[0]
        filepaths.append([])

        # Load data for the reference
        ref = pyexr.read(os.path.join(ref_folder, scene))[..., :3]
        if ref.sum() == 0:
            raise ValueError("got an all zero image {}/{}".format(
                ref_folder, scene))
        if pad > 0:  # remove a side portion
            ref = ref[pad:-pad, pad:-pad, :]

        LOG.info("%s", sname)
        for m_idx, m in enumerate(methods):
            mname = os.path.split(m)[-1]
            mname, spp = _get_spp(mname)
            row = {"method": mname, "scene": sname, "spp": spp}

            LOG.info("  %s %d spp", mname, spp)

            # Load data for the current method
            path = os.path.abspath(os.path.join(m, scene))
            filepaths[s_idx].append(path)
            try:
                im = pyexr.read(path)[..., :3]
            except Exception as e:
                LOG.error(e)
                row["valid"] = False
                for k in METRIC_OPS:
                    row[k] = -1
                scores = scores.append(row, ignore_index=True)
                continue

            if pad > 0:  # remove a side portion
                im = im[pad:-pad, pad:-pad, :]
            if im.sum() == 0:
                LOG.warning("got an all zero image {}/{}, "
                            " invalidating the scene".format(m, scene))
                row["valid"] = False
                for k in METRIC_OPS:
                    row[k] = -1
                scores = scores.append(row, ignore_index=True)
                continue

            row["valid"] = True
            for k in METRIC_OPS:
                row[k] = METRIC_OPS[k](im, ref)
            scores = scores.append(row, ignore_index=True)
    scores.to_csv(output)
Example #19
0
#!/usr/bin/env python3

import pyexr
import sys

# channel: 'R','G','B','A'
if len(sys.argv) != 4:
    print("extract_channel_pyexr: Extract a channel as 'Y' into a new EXR.")
    print("Usage: extract_channel_pyexr.py input_file channel output_file")
    exit()

r = pyexr.read(sys.argv[1], sys.argv[2])
pyexr.write(sys.argv[3], r, channel_names=['Y'])
Example #20
0
#!/usr/bin/env python3

import pyexr
import sys
import numpy as np

if len(sys.argv) != 3:
    print("average_channels: Average all channels into a single one.")
    print("Usage: average_channels.py input_file output_file")
    exit()

rgb = pyexr.read(sys.argv[1])
y = np.average(rgb, 2)

pyexr.write(sys.argv[2], y, channel_names=['Y'])
Example #21
0
def get_errors(run_dir, combination_type='var', error_type=ErrorType.CUMULATIVE, integrator='sdmm'):
    run_dir = os.path.join(run_dir, "") # adds to the end '/' if needed
    out_dir = os.path.dirname(run_dir)

    print(f"Combining run with weighing heuristic: {combination_type}.")

    rexp = r'^iteration_[0-9]*\.exr$' if integrator == 'ppg' else r'^iteration[0-9]*\.exr$'
    image_files = sorted([
        filename
        for filename in os.listdir(run_dir)
        if re.search(rexp, filename)
    ])

    json_dir = os.path.join(run_dir, "stats.json")

    scene_name = Path(out_dir).parents[2].name
    gt_path = get_gt_path(scene_name)
    gt_image = pyexr.read(gt_path)

    final_image = None
    total_reciprocal_weight = 0.0

    var_data=[]
    MrSE_data = []
    MAPE_data = []
    SMAPE_data = []
    spp_data=[]
    total_spp = 0

    stats_json = None
    if not os.path.exists(json_dir):
        print('Cannot find stats.json.')
        return {}

    with open(json_dir) as json_file:
        stats_json = json.load(json_file)

    elapsed_seconds = []
    var_data = []
    spp_data = []
    for i in range(len(stats_json)):
        elapsed_seconds.append(stats_json[i]['total_elapsed_seconds'])
        var_data.append(stats_json[i]['mean_pixel_variance'])
        spp_data.append(stats_json[i]['spp'])

    for iteration, image_file in tqdm(enumerate(image_files), total=len(image_files)):
        iteration_image = pyexr.read(os.path.join(run_dir, image_file))

        var = var_data[iteration]
        spp = spp_data[iteration]
        total_spp += spp
        if combination_type == 'var':
            weight = var_data[iteration]
        elif combination_type == 'uniform':
            weight = 1 / spp

        if (
            (integrator == 'ppg' and weight is not None)
            or (integrator == 'sdmm' and total_spp >= 40)
        ):
            total_reciprocal_weight += 1.0 / weight
            if final_image is None:
                final_image = iteration_image / weight
            else:
                final_image += iteration_image / weight

        if final_image is not None and error_type == ErrorType.CUMULATIVE:
            combined_estimate = final_image / total_reciprocal_weight
            MrSE_data.append(aggregate(MrSE(np.clip(combined_estimate, 0, 1), np.clip(gt_image, 0, 1))))
            MAPE_data.append(aggregate(MAPE(combined_estimate, gt_image)))
            SMAPE_data.append(aggregate(SMAPE(combined_estimate, gt_image)))
        elif final_image is None or error_type == ErrorType.INDIVIDUAL:
            MrSE_data.append(aggregate(MrSE(np.clip(iteration_image, 0, 1), np.clip(gt_image, 0, 1))))
            MAPE_data.append(aggregate(MAPE(iteration_image, gt_image)))
            SMAPE_data.append(aggregate(SMAPE(iteration_image, gt_image)))

    final_image /= total_reciprocal_weight

    out_path = os.path.join(out_dir, scene_name + '_test' + '.exr')
    if os.path.exists(out_path):
        os.remove(out_path)
    pyexr.write(out_path, final_image.astype(np.float32))

    SMAPE_final = aggregate(SMAPE(final_image, gt_image))
    MAPE_final = aggregate(MAPE(final_image, gt_image))
    MrSE_final = aggregate(MrSE(np.clip(final_image, 0, 1), np.clip(gt_image, 0, 1)))

    if len(elapsed_seconds) > 0:
        print(
            f"{Fore.GREEN}"
            f"Total time: {elapsed_seconds[-1]:.3f}s, "
            f"MAPE: {MAPE_final:.3f}, "
            f"{Style.RESET_ALL}"
        )

    return {
        'MAPE': (elapsed_seconds, MAPE_data, MAPE_final),
        'SMAPE': (elapsed_seconds, SMAPE_data, SMAPE_final),
        'MrSE': (elapsed_seconds, MrSE_data, MrSE_final),
    }
start_dir = os.getcwd()
pattern   = "*.exr"
for dir,_,_ in os.walk(start_dir):
    filenames.extend(glob(os.path.join(dir,pattern)))

# remove the stratification factor files form the list
factorImages = []
for name in filenames:
    if 'stratfactor-d' in name or 'factor-d' in name or 'variance-d' in name:
        factorImages.append(name)
for name in factorImages:
    filenames.remove(name)

# convert the factor images to false-color pngs
for f in factorImages:
    img = pyexr.read(f)
    fnamePng = f.replace('.exr', '.png')
    plt.imsave(fnamePng, img[:,:,0], vmin=1, vmax=10)

# separate the reference images for error computation
refGlobal = {}
refDirect = {}
for name in filenames:
    if 'ref-bdpt' in name:
        buffer = refGlobal
    elif 'ref-di' in name:
        buffer = refDirect
    else:
        continue

    if 'bathroom' in name:
Example #23
0
def compute(args):
    # assert len(args.labels) == len(args.methods)
    pad = args.pad

    scores = pd.DataFrame(columns=["method", "scene", "spp", "valid"] +
                          list(METRIC_LABELS.keys()))

    scenes = _parse_list_or_txt(args.scenes)
    methods = _parse_list_or_txt(args.methods)

    n_scenes = len(scenes)
    n_methods = len(methods)

    print("evaluating %d scenes and %d methods\n" % (n_scenes, n_methods))

    filepaths = []

    assert os.path.splitext(
        args.output)[-1] == ".csv", "expects .csv output path"
    dirname = os.path.dirname(args.output)
    os.makedirs(dirname, exist_ok=True)

    for s_idx, s in enumerate(scenes):
        sname = os.path.splitext(s)[0]
        filepaths.append([])
        # Load data for the reference
        ref = pyexr.read(os.path.join(args.ref, s))[..., :3]
        if ref.sum() == 0:
            raise ValueError("got an all zero image {}/{}".format(args.ref, s))
        if pad > 0:  # remove a side portion
            ref = ref[pad:-pad, pad:-pad, :]

        print(". %s" % sname)
        for m_idx, m in enumerate(methods):
            mname = os.path.split(m)[-1]
            mname, spp = _get_spp(mname)
            row = {"method": mname, "scene": sname, "spp": spp}

            print("  - %s %d spp" % (mname, spp))

            # Load data for the current method
            path = os.path.abspath(os.path.join(m, s))
            filepaths[s_idx].append(path)
            try:
                im = pyexr.read(path)[..., :3]
            except Exception as e:
                print(e)
                row["valid"] = False
                for k in METRIC_OPS:
                    row[k] = -1
                scores = scores.append(row, ignore_index=True)
                continue

            if pad > 0:  # remove a side portion
                im = im[pad:-pad, pad:-pad, :]
            if im.sum() == 0:
                print("got an all zero image {}/{}, invalidating the scene".
                      format(m, s))
                row["valid"] = False
                for k in METRIC_OPS:
                    row[k] = -1
                scores = scores.append(row, ignore_index=True)
                continue

            row["valid"] = True
            for k in METRIC_OPS:
                row[k] = METRIC_OPS[k](im, ref)
            scores = scores.append(row, ignore_index=True)

            # print("\t", m, "\trmse = {:.5f}".format(scores["rmse"][m_idx, s_idx],))

    print(scores)
    scores.to_csv(args.output)
Example #24
0
	ldr = np.power(hdr, gamma)
	ldr = np.round(hdr)
	return ldr

def PSNR(img_refer, img_noisy):
	img_refer_ldr = Tonemap(img_refer)
	img_noisy_ldr = Tonemap(img_noisy)
	return peak_signal_noise_ratio(img_refer_ldr, img_noisy_ldr, data_range=255)

if len(sys.argv) < 3:
	print("exror: Evaluate image in MSE, rMSE and SSIM.")
	print("Usage: exror.py reference_image noisy_image [other_noisy_image]")
	exit(0)

img_refer_name = sys.argv[1]
img_refer = pyexr.read(img_refer_name).astype(np.float64)

def GetSPP(file_name):
	spp = re.findall(r'\d*(?=\.exr)', file_name)
	if spp[0]:
		return int(spp[0])
	spp = re.findall(r'\d*(?=K\.exr)', file_name)
	if spp[0]:
		return int(spp[0]) * 1024
	spp = re.findall(r'\d*(?=K\.exr)', file_name)
	if spp[0]:
		return int(spp[0]) * 1024
	return sys.maxsize

img_noisy_names = {}
for i in range(2, len(sys.argv)):
Example #25
0
#!/usr/bin/env python3

import pyexr
import sys
import numpy as np

if len(sys.argv) != 3:
    print(
        "clamp: Turn zero-value pixels into red, nonzero-value pixels into green."
    )
    print("Usage: clamp.py input_file output_file")
    exit()

img = pyexr.read(sys.argv[1])
size = img.shape[0:2]

zero = np.zeros(size)

is_zero = np.all(img == 0, 2)
img_out = np.stack((is_zero, 1 - is_zero, zero), 2)

n_zero = is_zero.sum()
print(n_zero / (size[0] * size[1]))

pyexr.write(sys.argv[2], img_out)
Example #26
0
import pyexr
import numpy as np

if __name__ == '__main__':

    shift_coords = [[1, 0], [0, 1]]

    path = "path/to/some/image.exr"
    img = pyexr.read(path)

    height = img.shape[0]
    width = img.shape[1]

    r = pyexr.read(path, "R")
    g = pyexr.read(path, "G")
    b = pyexr.read(path, "B")
    a = pyexr.read(path, "A")

    for coords in shift_coords:

        new_r = np.zeros((height, width, 1))
        new_g = np.zeros((height, width, 1))
        new_b = np.zeros((height, width, 1))
        # new_a = np.zeros((height, width, 1))

        print(width, height)

        for h in range(height):
            for w in range(width):
                target_w = w + coords[0]
                target_h = h + coords[1]
Example #27
0
def combine_renders(run_dir, combination_type='var', error_type=ErrorType.CUMULATIVE):
    run_dir = os.path.join(run_dir, "") # adds to the end '/' if needed
    out_dir = os.path.dirname(run_dir)

    print(f"Combining run with weighing heuristic: {combination_type}.")

    image_files = sorted([
        filename
        for filename in os.listdir(run_dir)
        if re.search(r'^iteration[0-9]*\.exr$', filename)
    ])

    image_sqr_files = sorted([
        filename
        for filename in os.listdir(run_dir)
        if re.search(r'^iteration_sqr[0-9]*\.exr$', filename)
    ])

    scene_name = Path(out_dir).parents[2].name
    gt_path = get_gt_path(scene_name)
    gt_image = pyexr.read(gt_path)

    final_image = None
    total_reciprocal_weight = 0.0

    var_data=[]
    MrSE_data = []
    MAPE_data = []
    SMAPE_data = []
    spp_data=[]
    total_spp = 0

    for image_file, image_sqr_file in tqdm(
        zip(image_files, image_sqr_files), total=len(image_files)
    ):
        iteration = int(
            re.search(r'^iteration([0-9]*)\.exr$', image_file).group(1)
        )
        # print('Processing iteration {}.'.format(iteration))
        iteration_image, iteration_square_image, spp = load_iteration(
            os.path.join(run_dir, image_file),
            os.path.join(run_dir, image_sqr_file)
        )

        image_shape = iteration_image.shape[:2]

        image_variance = spp / (spp - 1) * (
            iteration_square_image / spp - (iteration_image / spp) ** 2
        )
        # image_variance = ski.filters.gaussian(
        #     image_variance, sigma=3, multichannel=True
        # )
        image_variance = np.clip(image_variance, 0, 2000)
        per_channel_variance = np.mean(image_variance, axis=(0,1))
        max_variance = np.maximum(image_variance, per_channel_variance)

        # pyexr.write(os.path.join(out_dir, f'iteration_var{iteration}.exr'), image_variance)

        total_spp += spp
        spp_data.append(total_spp)
        # print('Iteration {} spp={}.'.format(iteration, spp))

        var_data.append(aggregate(image_variance))
        if iteration >= 4: # or iteration > 10:
            # print('Including iteration {}.'.format(iteration))
            if combination_type == 'var':
                weight = per_channel_variance
            elif combination_type == 'max_var':
                weight = max_variance
            elif combination_type == 'uniform':
                weight = 1 / spp
            total_reciprocal_weight += 1.0 / weight
            if final_image is None:
                final_image = iteration_image / weight
            else:
                final_image += iteration_image / weight


        if final_image is not None and error_type == ErrorType.CUMULATIVE:
            combined_estimate = final_image / total_reciprocal_weight
            # out_path = os.path.join(out_dir, f'combined_{iteration:02d}' + '.exr')
            # if os.path.exists(out_path):
            #     os.remove(out_path)
            # pyexr.write(out_path, combined_estimate.astype(np.float32))
            MrSE_data.append(aggregate(MrSE(np.clip(combined_estimate, 0, 1), np.clip(gt_image, 0, 1))))
            MAPE_data.append(aggregate(MAPE(combined_estimate, gt_image)))
            SMAPE_data.append(aggregate(SMAPE(combined_estimate, gt_image)))
        elif final_image is None or error_type == ErrorType.INDIVIDUAL:
            MrSE_data.append(aggregate(MrSE(np.clip(iteration_image, 0, 1), np.clip(gt_image, 0, 1))))
            MAPE_data.append(aggregate(MAPE(iteration_image, gt_image)))
            SMAPE_data.append(aggregate(SMAPE(iteration_image, gt_image)))

    if len(var_data) <= 1:
        return {}

    json_dir = os.path.join(run_dir, "stats.json")
    total_elapsed_seconds = None
    mean_path_length = None
    elapsed_seconds = []
    if os.path.exists(json_dir):
        with open(json_dir) as json_file:
            stats_json = json.load(json_file)
            for i in range(len(stats_json)):
                stats_json[i]['mean_pixel_variance']=float(var_data[i])
                stats_json[i]['ttuv']=float(var_data[i]) * stats_json[i]['elapsed_seconds']
                elapsed_seconds.append(stats_json[i]['total_elapsed_seconds'])
            last_stats = stats_json[-1]
            total_elapsed_seconds=last_stats['total_elapsed_seconds']
            mean_path_length=last_stats['mean_path_length']
        with open(json_dir, 'w') as json_file:
            json_file.write(json.dumps(stats_json, sort_keys=True, indent=4))

    plot_filename = 'error.pdf'
    plot_file = os.path.join(out_dir, plot_filename)
    fig, ax = plt.subplots(figsize=(12, 9))
    ax.set_axisbelow(True)
    ax.minorticks_on()
    ax.grid(which='major', axis='both', linestyle='-', linewidth='0.5')
    ax.grid(which='minor', axis='both', linestyle=':', linewidth='0.5')
    ax.semilogy(elapsed_seconds, var_data, base=10, label='Estimated VAR')
    ax.semilogy(elapsed_seconds, SMAPE_data, base=10, label='SMAPE')
    ax.semilogy(elapsed_seconds, MrSE_data, base=10, label='MRSE')
    ax.semilogy(elapsed_seconds, MAPE_data, base=10, label='MAPE')
    # ax.plot(var_data, label='Estimated VAR')
    # ax.plot(SMAPE_data, label='SMAPE')
    # ax.plot(MrSE_data, label='MRSE')
    # ax.set_yticks(var_data)
    ax.legend()
    fig.tight_layout()
    plt.savefig(plot_file, format=os.path.splitext(plot_filename)[-1][1:], dpi=fig.dpi)

    if final_image is None:
        print('No final image produced!')
        return

    final_image /= total_reciprocal_weight

    SMAPE_final = aggregate(SMAPE(final_image, gt_image))
    MAPE_final = aggregate(MAPE(final_image, gt_image))
    MrSE_final = aggregate(MrSE(np.clip(final_image, 0, 1), np.clip(gt_image, 0, 1)))

    print(f"{Fore.GREEN}MAPE={MAPE_final:.3f}, SMAPE={SMAPE_final:.3f}, MrSE={MrSE_final:.3f}{Style.RESET_ALL}")
    if total_elapsed_seconds and mean_path_length:
        print(
            f"{Fore.GREEN}"
            f"Total time: {total_elapsed_seconds:.3f}s, "
            f"Mean path length: {mean_path_length:.3f}."
            f"{Style.RESET_ALL}"
        )

    out_path = os.path.join(out_dir, scene_name + '.exr')
    if os.path.exists(out_path):
        os.remove(out_path)
    pyexr.write(out_path, final_image.astype(np.float32))
    return {
        'MAPE': (elapsed_seconds, MAPE_data, MAPE_final),
        'SMAPE': (elapsed_seconds, SMAPE_data, SMAPE_final),
        'MrSE': (elapsed_seconds, MrSE_data, MrSE_final),
    }