def run(dataset_type, spad_file, densedepth_depth_file, hyper_string, sid_bins,
        alpha, beta, offset, intensity_ablation, vectorized, entry,
        save_outputs, small_run, output_dir):
    print("output dir: {}".format(output_dir))
    safe_makedir(output_dir)

    # Load all the data:
    print("Loading SPAD data from {}".format(spad_file))
    spad_dict = np.load(spad_file, allow_pickle=True).item()
    spad_data = spad_dict["spad"]
    intensity_data = spad_dict["intensity"]
    spad_config = spad_dict["config"]
    print("Loading depth data from {}".format(densedepth_depth_file))
    depth_data = np.load(densedepth_depth_file)
    dataset = load_data(channels_first=True, dataset_type=dataset_type)

    # Read SPAD config and determine proper course of action
    dc_count = spad_config["dc_count"]
    ambient = spad_config["dc_count"] / spad_config["spad_bins"]
    use_intensity = spad_config["use_intensity"]
    use_squared_falloff = spad_config["use_squared_falloff"]
    lambertian = spad_config["lambertian"]
    use_poisson = spad_config["use_poisson"]
    min_depth = spad_config["min_depth"]
    max_depth = spad_config["max_depth"]

    print("ambient: ", ambient)
    print("dc_count: ", dc_count)
    print("use_intensity: ", use_intensity)
    print("use_squared_falloff:", use_squared_falloff)
    print("lambertian:", lambertian)

    print("spad_data.shape", spad_data.shape)
    print("depth_data.shape", depth_data.shape)
    print("intensity_data.shape", intensity_data.shape)

    sid_obj_init = SID(sid_bins, alpha, beta, offset)

    if entry is None:
        metric_list = [
            "delta1", "delta2", "delta3", "rel_abs_diff", "rmse", "mse",
            "log10", "weight"
        ]
        metrics = np.zeros(
            (len(dataset) if not small_run else small_run, len(metric_list)))
        entry_list = []
        outputs = []
        times = []
        for i in range(depth_data.shape[0]):
            if small_run and i == small_run:
                break
            entry_list.append(i)
            print("Evaluating {}[{}]".format(dataset_type, i))
            spad = spad_data[i, ...]
            bin_edges = np.linspace(min_depth, max_depth, len(spad) + 1)
            bin_values = (bin_edges[1:] + bin_edges[:-1]) / 2
            # spad = preprocess_spad_ambient_estimate(spad, min_depth, max_depth,
            #                                             correct_falloff=use_squared_falloff,
            #                                             remove_dc= dc_count > 0.,
            #                                             global_min_depth=np.min(depth_data),
            #                                             n_std=1. if use_poisson else 0.01)
            # Rescale SPAD_data
            weights = np.ones_like(depth_data[i, 0, ...])
            # Ablation study: Turn off intensity, even if spad has been simulated with it.
            if use_intensity and not intensity_ablation:
                weights = intensity_data[i, 0, ...]

            if dc_count > 0.:
                spad = remove_dc_from_spad_edge(
                    spad,
                    ambient=ambient,
                    # grad_th=2*ambient)
                    grad_th=5 * np.sqrt(2 * ambient))
            # print(2*ambient)
            # print(5*np.sqrt(2*ambient))

            if use_squared_falloff:
                if lambertian:
                    spad = spad * bin_values**4
                else:
                    spad = spad * bin_values**2
            # Scale SID object to maximize bin utilization
            nonzeros = np.nonzero(spad)[0]
            if nonzeros.size > 0:
                min_depth_bin = np.min(nonzeros)
                max_depth_bin = np.max(nonzeros) + 1
                if max_depth_bin > len(bin_edges) - 2:
                    max_depth_bin = len(bin_edges) - 2
            else:
                min_depth_bin = 0
                max_depth_bin = len(bin_edges) - 2
            min_depth_pred = np.clip(bin_edges[min_depth_bin],
                                     a_min=1e-2,
                                     a_max=None)
            max_depth_pred = np.clip(bin_edges[max_depth_bin + 1],
                                     a_min=1e-2,
                                     a_max=None)
            # print(min_depth_pred)
            # print(max_depth_pred)
            sid_obj_pred = SID(sid_bins=sid_obj_init.sid_bins,
                               alpha=min_depth_pred,
                               beta=max_depth_pred,
                               offset=0.)
            spad_rescaled = rescale_bins(spad[min_depth_bin:max_depth_bin + 1],
                                         min_depth_pred, max_depth_pred,
                                         sid_obj_pred)
            start = process_time()
            pred, t = image_histogram_match_variable_bin(
                depth_data[i, 0, ...], spad_rescaled, weights, sid_obj_init,
                sid_obj_pred, vectorized)
            times.append(process_time() - start)
            # break
            # Calculate metrics
            gt = dataset[i]["depth_cropped"].unsqueeze(0)
            # print(gt.dtype)
            # print(pred.shape)
            # print(pred[20:30, 20:30])

            pred_metrics = get_depth_metrics(
                torch.from_numpy(pred).unsqueeze(0).unsqueeze(0).float(), gt,
                torch.ones_like(gt))

            for j, metric_name in enumerate(metric_list[:-1]):
                metrics[i, j] = pred_metrics[metric_name]

            metrics[i, -1] = np.size(pred)
            # Option to save outputs:
            if save_outputs:
                outputs.append(pred)
            print("\tAvg RMSE = {}".format(
                np.mean(metrics[:i + 1, metric_list.index("rmse")])))

        if save_outputs:
            np.save(
                os.path.join(output_dir,
                             "densedepth_{}_outputs.npy".format(hyper_string)),
                np.array(outputs))
        print("Avg Time: {}".format(np.mean(times)))
        # Save metrics using pandas
        metrics_df = pd.DataFrame(data=metrics,
                                  index=entry_list,
                                  columns=metric_list)
        metrics_df.to_pickle(path=os.path.join(
            output_dir, "densedepth_{}_metrics.pkl".format(hyper_string)))
        # Compute weighted averages:
        average_metrics = np.average(metrics_df.ix[:, :-1],
                                     weights=metrics_df.weight,
                                     axis=0)
        average_df = pd.Series(data=average_metrics, index=metric_list[:-1])
        average_df.to_csv(os.path.join(
            output_dir, "densedepth_{}_avg_metrics.csv".format(hyper_string)),
                          header=True)
        print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format(
            'd1', 'd2', 'd3', 'rel', 'rmse', 'log_10'))
        print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".
              format(average_metrics[0], average_metrics[1],
                     average_metrics[2], average_metrics[3],
                     average_metrics[4], average_metrics[6]))

        print("wrote results to {} ({})".format(output_dir, hyper_string))

    else:
        input_unbatched = dataset.get_item_by_id(entry)
        # for key in ["rgb", "albedo", "rawdepth", "spad", "mask", "rawdepth_orig", "mask_orig", "albedo_orig"]:
        #     input_[key] = input_[key].unsqueeze(0)
        from torch.utils.data._utils.collate import default_collate

        data = default_collate([input_unbatched])

        # Checks
        entry = data["entry"][0]
        i = int(entry)
        entry = entry if isinstance(entry, str) else entry.item()
        print("Evaluating {}[{}]".format(dataset_type, i))
        # Rescale SPAD
        spad = spad_data[i, ...]
        spad_rescaled = rescale_bins(spad, min_depth, max_depth, sid_obj)
        print("spad_rescaled", spad_rescaled)
        weights = np.ones_like(depth_data[i, 0, ...])
        if use_intensity:
            weights = intensity_data[i, 0, ...]
        # spad_rescaled = preprocess_spad_sid_gmm(spad_rescaled, sid_obj, use_squared_falloff, dc_count > 0.)
        # spad_rescaled = preprocess_spad_sid(spad_rescaled, sid_obj, use_squared_falloff, dc_count > 0.
        #                                     )

        if dc_count > 0.:
            spad_rescaled = remove_dc_from_spad(
                spad_rescaled,
                sid_obj.sid_bin_edges,
                sid_obj.sid_bin_values[:-2]**2,
                lam=1e1 if use_poisson else 1e-1,
                eps_rel=1e-5)
        if use_squared_falloff:
            spad_rescaled = spad_rescaled * sid_obj.sid_bin_values[:-2]**2
        # print(spad_rescaled)
        pred, _ = image_histogram_match(depth_data[i, 0, ...], spad_rescaled,
                                        weights, sid_obj)
        # break
        # Calculate metrics
        gt = data["depth_cropped"]
        print(gt.shape)
        print(pred.shape)
        print(gt[:, :, 40, 60])
        print(depth_data[i, 0, 40, 60])
        print("before rmse: ",
              np.sqrt(np.mean((gt.numpy() - depth_data[i, 0, ...])**2)))

        before_metrics = get_depth_metrics(
            torch.from_numpy(
                depth_data[i, 0, ...]).unsqueeze(0).unsqueeze(0).float(), gt,
            torch.ones_like(gt))
        pred_metrics = get_depth_metrics(
            torch.from_numpy(pred).unsqueeze(0).unsqueeze(0).float(), gt,
            torch.ones_like(gt))
        if save_outputs:
            np.save(
                os.path.join(
                    output_dir, "densedepth_{}[{}]_{}_out.npy".format(
                        dataset_type, entry, hyper_string)), pred)

        print("before:")
        print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format(
            'd1', 'd2', 'd3', 'rel', 'rmse', 'log_10'))
        print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".
              format(before_metrics["delta1"], before_metrics["delta2"],
                     before_metrics["delta3"], before_metrics["rel_abs_diff"],
                     before_metrics["rmse"], before_metrics["log10"]))
        print("after:")

        print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format(
            'd1', 'd2', 'd3', 'rel', 'rmse', 'log_10'))
        print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".
              format(pred_metrics["delta1"], pred_metrics["delta2"],
                     pred_metrics["delta3"], pred_metrics["rel_abs_diff"],
                     pred_metrics["rmse"], pred_metrics["log10"]))
def run(dataset_type, spad_file, dorn_depth_file, hyper_string, sid_bins,
        alpha, beta, offset, lam, eps_rel, entry, save_outputs, small_run,
        output_dir):
    # Load all the data:
    print("Loading SPAD data from {}".format(spad_file))
    spad_dict = np.load(spad_file, allow_pickle=True).item()
    spad_data = spad_dict["spad"]
    intensity_data = spad_dict["intensity"]
    spad_config = spad_dict["config"]
    print("Loading depth data from {}".format(dorn_depth_file))
    depth_data = np.load(dorn_depth_file)
    dataset = load_data(channels_first=True, dataset_type=dataset_type)

    # Read SPAD config and determine proper course of action
    dc_count = spad_config["dc_count"]
    ambient = spad_config["dc_count"] / spad_config["spad_bins"]
    use_intensity = spad_config["use_intensity"]
    use_squared_falloff = spad_config["use_squared_falloff"]
    min_depth = spad_config["min_depth"]
    max_depth = spad_config["max_depth"]

    print("dc_count: ", dc_count)
    print("use_intensity: ", use_intensity)
    print("use_squared_falloff:", use_squared_falloff)

    print("spad_data.shape", spad_data.shape)
    print("depth_data.shape", depth_data.shape)
    print("intensity_data.shape", intensity_data.shape)

    sid_obj = SID(sid_bins, alpha, beta, offset)

    if entry is None:
        metric_list = [
            "delta1", "delta2", "delta3", "rel_abs_diff", "rmse", "mse",
            "log10", "weight"
        ]
        metrics = np.zeros(
            (len(dataset) if not small_run else small_run, len(metric_list)))
        entry_list = []
        outputs = []
        for i in range(depth_data.shape[0]):
            if small_run and i == small_run:
                break
            entry_list.append(i)

            print("Evaluating {}[{}]".format(dataset_type, i))
            spad = spad_data[i, ...]
            weights = np.ones_like(depth_data[i, 0, ...])
            if use_intensity:
                weights = intensity_data[i, 0, ...]
            if dc_count > 0.:
                spad = remove_dc_from_spad_edge(spad,
                                                ambient=ambient,
                                                grad_th=3 * ambient)
            if use_squared_falloff:
                bin_edges = np.linspace(min_depth, max_depth, len(spad) + 1)
                bin_values = (bin_edges[1:] + bin_edges[:-1]) / 2
                spad = spad * bin_values**2
            spad_rescaled = rescale_bins(spad, min_depth, max_depth, sid_obj)
            pred, _ = image_histogram_match(depth_data[i, 0, ...],
                                            spad_rescaled, weights, sid_obj)
            # break
            # Calculate metrics
            gt = dataset[i]["depth_cropped"].unsqueeze(0)
            # print(gt.dtype)
            # print(pred.shape)

            pred_metrics = get_depth_metrics(
                torch.from_numpy(pred).unsqueeze(0).unsqueeze(0).float(), gt,
                torch.ones_like(gt))

            for j, metric_name in enumerate(metric_list[:-1]):
                metrics[i, j] = pred_metrics[metric_name]

            metrics[i, -1] = np.size(pred)
            # Option to save outputs:
            if save_outputs:
                outputs.append(pred)

        if save_outputs:
            np.save(
                os.path.join(output_dir,
                             "dorn_{}_outputs.npy".format(hyper_string)),
                np.array(outputs))

        # Save metrics using pandas
        metrics_df = pd.DataFrame(data=metrics,
                                  index=entry_list,
                                  columns=metric_list)
        metrics_df.to_pickle(path=os.path.join(
            output_dir, "dorn_{}_metrics.pkl".format(hyper_string)))
        # Compute weighted averages:
        average_metrics = np.average(metrics_df.ix[:, :-1],
                                     weights=metrics_df.weight,
                                     axis=0)
        average_df = pd.Series(data=average_metrics, index=metric_list[:-1])
        average_df.to_csv(os.path.join(
            output_dir, "dorn_{}_avg_metrics.csv".format(hyper_string)),
                          header=True)
        print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format(
            'd1', 'd2', 'd3', 'rel', 'rms', 'log_10'))
        print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".
              format(average_metrics[0], average_metrics[1],
                     average_metrics[2], average_metrics[3],
                     average_metrics[4], average_metrics[6]))

        print("wrote results to {} ({})".format(output_dir, hyper_string))

    else:
        input_unbatched = dataset.get_item_by_id(entry)
        # for key in ["rgb", "albedo", "rawdepth", "spad", "mask", "rawdepth_orig", "mask_orig", "albedo_orig"]:
        #     input_[key] = input_[key].unsqueeze(0)
        from torch.utils.data._utils.collate import default_collate

        data = default_collate([input_unbatched])

        # Checks
        entry = data["entry"][0]
        i = int(entry)
        entry = entry if isinstance(entry, str) else entry.item()
        print("Evaluating {}[{}]".format(dataset_type, i))
        # Rescale SPAD
        spad_rescaled = rescale_bins(spad_data[i, ...], min_depth, max_depth,
                                     sid_obj)
        weights = np.ones_like(depth_data[i, 0, ...])
        if use_intensity:
            weights = intensity_data[i, 0, ...]
        spad_rescaled = preprocess_spad(spad_rescaled,
                                        sid_obj,
                                        use_squared_falloff,
                                        dc_count > 0.,
                                        lam=lam,
                                        eps_rel=eps_rel)

        pred, _ = image_histogram_match(depth_data[i, 0, ...], spad_rescaled,
                                        weights, sid_obj)
        # break
        # Calculate metrics
        gt = data["depth_cropped"]
        print(gt.shape)
        print(pred.shape)

        pred_metrics = get_depth_metrics(
            torch.from_numpy(pred).unsqueeze(0).unsqueeze(0), gt,
            torch.ones_like(gt))
        if save_outputs:
            np.save(
                os.path.join(
                    output_dir,
                    "dorn_{}[{}]_{}_out.npy".format(dataset_type, entry,
                                                    hyper_string)))
        print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format(
            'd1', 'd2', 'd3', 'rel', 'rms', 'log_10'))
        print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".
              format(pred_metrics["delta1"], pred_metrics["delta2"],
                     pred_metrics["delta3"], pred_metrics["rel_abs_diff"],
                     pred_metrics["rms"], pred_metrics["log10"]))
def run(dataset_type, spad_file, densedepth_depth_file, hyper_string, sid_bins,
        alpha, beta, offset, lam, eps_rel, n_std, entry, save_outputs,
        small_run, subsampling, output_dir):
    # Load all the data:
    print("Loading SPAD data from {}".format(spad_file))
    spad_dict = np.load(spad_file, allow_pickle=True).item()
    spad_data = spad_dict["spad"]
    intensity_data = spad_dict["intensity"]
    spad_config = spad_dict["config"]
    print("Loading depth data from {}".format(densedepth_depth_file))
    depth_data = np.load(densedepth_depth_file)
    dataset = load_data(channels_first=True, dataset_type=dataset_type)

    # Read SPAD config and determine proper course of action
    dc_count = spad_config["dc_count"]
    use_intensity = spad_config["use_intensity"]
    use_squared_falloff = spad_config["use_squared_falloff"]
    use_poisson = spad_config["use_poisson"]
    min_depth = spad_config["min_depth"]
    max_depth = spad_config["max_depth"]

    print("dc_count: ", dc_count)
    print("use_intensity: ", use_intensity)
    print("use_squared_falloff:", use_squared_falloff)

    print("spad_data.shape", spad_data.shape)
    print("depth_data.shape", depth_data.shape)
    print("intensity_data.shape", intensity_data.shape)

    sid_obj = SID(sid_bins, alpha, beta, offset)

    if entry is None:
        metric_list = [
            "delta1", "delta2", "delta3", "rel_abs_diff", "rmse", "mse",
            "log10", "weight"
        ]
        print(len(dataset) // subsampling)
        metrics = np.zeros(
            (len(dataset) // subsampling + 1 if not small_run else small_run,
             len(metric_list)))
        entry_list = []
        outputs = []
        for i in range(depth_data.shape[0]):
            idx = i * subsampling

            if idx >= depth_data.shape[0] or (small_run and i >= small_run):
                break
            entry_list.append(idx)

            print("Evaluating {}[{}]".format(dataset_type, idx))
            spad = spad_data[idx, ...]
            # spad = preprocess_spad_ambient_estimate(spad, min_depth, max_depth,
            #                                             correct_falloff=use_squared_falloff,
            #                                             remove_dc= dc_count > 0.,
            #                                             global_min_depth=np.min(depth_data),
            #                                             n_std=1. if use_poisson else 0.01)
            # Rescale SPAD_data
            spad_rescaled = rescale_bins(spad, min_depth, max_depth, sid_obj)
            weights = np.ones_like(depth_data[idx, 0, ...])
            if use_intensity:
                weights = intensity_data[idx, 0, ...]
            # spad_rescaled = preprocess_spad_sid_gmm(spad_rescaled, sid_obj, use_squared_falloff, dc_count > 0.)
            if dc_count > 0.:
                spad_rescaled = remove_dc_from_spad(
                    spad_rescaled,
                    sid_obj.sid_bin_edges,
                    sid_obj.sid_bin_values[:-2]**2,
                    lam=1e-1 if spad_config["use_poisson"] else 1e-1,
                    eps_rel=1e-5)
                # spad_rescaled = remove_dc_from_spad_poisson(spad_rescaled,
                #                                        sid_obj.sid_bin_edges,
                #                                        lam=lam)
                # spad = remove_dc_from_spad_ambient_estimate(spad,
                #                                             min_depth, max_depth,
                #                                             global_min_depth=np.min(depth_data),
                #                                             n_std=n_std)
                # print(spad[:10])
                # print(spad)

            if use_squared_falloff:
                spad_rescaled = spad_rescaled * sid_obj.sid_bin_values[:-2]**2
                # bin_edges = np.linspace(min_depth, max_depth, len(spad) + 1)
                # bin_values = (bin_edges[1:] + bin_edges[:-1])/2
                # spad = spad * bin_values ** 2
            # spad_rescaled = rescale_bins(spad, min_depth, max_depth, sid_obj)
            pred, _ = image_histogram_match(depth_data[idx, 0, ...],
                                            spad_rescaled, weights, sid_obj)
            # break
            # Calculate metrics
            gt = dataset[idx]["depth_cropped"].unsqueeze(0)
            # print(gt.dtype)
            # print(pred.shape)
            # print(pred[20:30, 20:30])

            pred_metrics = get_depth_metrics(
                torch.from_numpy(pred).unsqueeze(0).unsqueeze(0).float(), gt,
                torch.ones_like(gt))

            for j, metric_name in enumerate(metric_list[:-1]):
                metrics[i, j] = pred_metrics[metric_name]

            metrics[i, -1] = np.size(pred)
            # Option to save outputs:
            if save_outputs:
                outputs.append(pred)
            print("\tAvg RMSE = {}".format(
                np.mean(metrics[:i + 1, metric_list.index("rmse")])))

        if save_outputs:
            np.save(
                os.path.join(output_dir,
                             "densedepth_{}_outputs.npy".format(hyper_string)),
                np.array(outputs))

        # Save metrics using pandas
        metrics_df = pd.DataFrame(data=metrics,
                                  index=entry_list,
                                  columns=metric_list)
        metrics_df.to_pickle(path=os.path.join(
            output_dir, "densedepth_{}_metrics.pkl".format(hyper_string)))
        # Compute weighted averages:
        average_metrics = np.average(metrics_df.ix[:, :-1],
                                     weights=metrics_df.weight,
                                     axis=0)
        average_df = pd.Series(data=average_metrics, index=metric_list[:-1])
        average_df.to_csv(os.path.join(
            output_dir, "densedepth_{}_avg_metrics.csv".format(hyper_string)),
                          header=True)
        print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format(
            'd1', 'd2', 'd3', 'rel', 'rmse', 'log_10'))
        print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".
              format(average_metrics[0], average_metrics[1],
                     average_metrics[2], average_metrics[3],
                     average_metrics[4], average_metrics[6]))

        print("wrote results to {} ({})".format(output_dir, hyper_string))

    else:
        input_unbatched = dataset.get_item_by_id(entry)
        # for key in ["rgb", "albedo", "rawdepth", "spad", "mask", "rawdepth_orig", "mask_orig", "albedo_orig"]:
        #     input_[key] = input_[key].unsqueeze(0)
        from torch.utils.data._utils.collate import default_collate

        data = default_collate([input_unbatched])

        # Checks
        entry = data["entry"][0]
        i = int(entry)
        entry = entry if isinstance(entry, str) else entry.item()
        print("Evaluating {}[{}]".format(dataset_type, i))
        # Rescale SPAD
        spad = spad_data[i, ...]
        spad_rescaled = rescale_bins(spad, min_depth, max_depth, sid_obj)
        print("spad_rescaled", spad_rescaled)
        weights = np.ones_like(depth_data[i, 0, ...])
        if use_intensity:
            weights = intensity_data[i, 0, ...]
        # spad_rescaled = preprocess_spad_sid_gmm(spad_rescaled, sid_obj, use_squared_falloff, dc_count > 0.)
        # spad_rescaled = preprocess_spad_sid(spad_rescaled, sid_obj, use_squared_falloff, dc_count > 0.
        #                                     )

        if dc_count > 0.:
            spad_rescaled = remove_dc_from_spad(
                spad_rescaled,
                sid_obj.sid_bin_edges,
                sid_obj.sid_bin_values[:-2]**2,
                lam=1e1 if use_poisson else 1e-1,
                eps_rel=1e-5)
        if use_squared_falloff:
            spad_rescaled = spad_rescaled * sid_obj.sid_bin_values[:-2]**2
        # print(spad_rescaled)
        pred, _ = image_histogram_match(depth_data[i, 0, ...], spad_rescaled,
                                        weights, sid_obj)
        # break
        # Calculate metrics
        gt = data["depth_cropped"]
        print(gt.shape)
        print(pred.shape)
        print(gt[:, :, 40, 60])
        print(depth_data[i, 0, 40, 60])
        print("before rmse: ",
              np.sqrt(np.mean((gt.numpy() - depth_data[i, 0, ...])**2)))

        before_metrics = get_depth_metrics(
            torch.from_numpy(
                depth_data[i, 0, ...]).unsqueeze(0).unsqueeze(0).float(), gt,
            torch.ones_like(gt))
        pred_metrics = get_depth_metrics(
            torch.from_numpy(pred).unsqueeze(0).unsqueeze(0).float(), gt,
            torch.ones_like(gt))
        if save_outputs:
            np.save(
                os.path.join(
                    output_dir, "densedepth_{}[{}]_{}_out.npy".format(
                        dataset_type, entry, hyper_string)), pred)

        print("before:")
        print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format(
            'd1', 'd2', 'd3', 'rel', 'rmse', 'log_10'))
        print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".
              format(before_metrics["delta1"], before_metrics["delta2"],
                     before_metrics["delta3"], before_metrics["rel_abs_diff"],
                     before_metrics["rmse"], before_metrics["log10"]))
        print("after:")

        print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format(
            'd1', 'd2', 'd3', 'rel', 'rmse', 'log_10'))
        print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".
              format(pred_metrics["delta1"], pred_metrics["delta2"],
                     pred_metrics["delta3"], pred_metrics["rel_abs_diff"],
                     pred_metrics["rmse"], pred_metrics["log10"]))
def analyze(data_dir, calibration_file, scenes, offsets, output_dir,
            bin_width_ps, bin_width_m, min_depth_bin, max_depth_bin, min_depth,
            max_depth, sid_obj, ambient_max_depth_bin, device):
    model = DenseDepth()
    fc_kinect, fc_spad, pc_kinect, pc_spad, rdc_kinect, rdc_spad, tdc_kinect, tdc_spad, \
        RotationOfSpad, TranslationOfSpad = extract_camera_params(calibration_file)
    # print(fc_kinect)
    # print(fc_spad)
    RotationOfKinect = RotationOfSpad.T
    TranslationOfKinect = -TranslationOfSpad.dot(RotationOfSpad.T)

    for scene, offset in zip(scenes, offsets):
        print("Running {}...".format(scene))
        rootdir = os.path.join(data_dir, scene)
        scenedir = os.path.join(output_dir, scene)

        safe_makedir(os.path.join(scenedir))
        # Load all the SPAD and kinect data
        spad = load_spad(os.path.join(rootdir, "spad", "data_accum.mat"))
        # print(spad.shape)
        spad_relevant = spad[..., min_depth_bin:max_depth_bin]
        spad_single_relevant = np.sum(spad_relevant, axis=(0, 1))
        ambient_estimate = np.mean(
            spad_single_relevant[:ambient_max_depth_bin])

        # Get ground truth depth
        gt_idx = np.argmax(spad, axis=2)
        gt_r = signal.medfilt(np.fliplr(np.flipud((gt_idx * bin_width_m).T)),
                              kernel_size=5)
        mask = (gt_r >= min_depth).astype('float').squeeze()
        gt_z = r_to_z(gt_r, fc_spad)
        gt_z = undistort_img(gt_z, fc_spad, pc_spad, rdc_spad, tdc_spad)
        mask = np.round(
            undistort_img(mask, fc_spad, pc_spad, rdc_spad, tdc_spad))
        # Nearest neighbor upsampling to reduce holes in output
        scale_factor = 2
        gt_z_up = cv2.resize(gt_z,
                             dsize=(scale_factor * gt_z.shape[0],
                                    scale_factor * gt_z.shape[1]),
                             interpolation=cv2.INTER_NEAREST)
        mask_up = cv2.resize(mask,
                             dsize=(scale_factor * mask.shape[0],
                                    scale_factor * mask.shape[1]),
                             interpolation=cv2.INTER_NEAREST)

        # Get RGB and intensity
        rgb, rgb_cropped, intensity, crop = load_and_crop_kinect(rootdir)
        # print(crop)
        # Undistort rgb
        # rgb = undistort_img(rgb, fc_kinect, pc_kinect, rdc_kinect, tdc_kinect)
        # # Crop
        # rgb_cropped = rgb[crop[0]:crop[1], crop[2]:crop[3], :]
        # Intensity
        # intensity = rgb_cropped[:, :, 0] / 225.

        # Project GT depth and mask to RGB image coordinates and crop it.
        gt_z_proj, mask_proj = project_depth(gt_z_up, mask_up,
                                             (rgb.shape[0], rgb.shape[1]),
                                             fc_spad * scale_factor, fc_kinect,
                                             pc_spad * scale_factor, pc_kinect,
                                             RotationOfKinect,
                                             TranslationOfKinect / 1e3)
        gt_z_proj_crop = gt_z_proj[crop[0] + offset[0]:crop[1] + offset[0],
                                   crop[2] + offset[1]:crop[3] + offset[1]]
        gt_z_proj_crop = signal.medfilt(gt_z_proj_crop, kernel_size=5)
        # mask_proj_crop = mask_proj[crop[0]+offset[0]:crop[1]+offset[0],
        #                            crop[2]+offset[1]:crop[3]+offset[1]]
        mask_proj_crop = (gt_z_proj_crop >=
                          min_depth).astype('float').squeeze()

        # Process SPAD
        spad_sid = preprocess_spad(spad_single_relevant, ambient_estimate,
                                   min_depth, max_depth, sid_obj)

        # Initialize with CNN
        z_init = model.predict(rgb_cropped).squeeze()
        r_init = z_to_r(z_init, fc_kinect)

        # Histogram Match
        weights = intensity
        r_pred, t = image_histogram_match(r_init, spad_sid, weights, sid_obj)
        z_pred = r_to_z(r_pred, fc_kinect)

        # Mean Match
        med_bin = get_hist_med(spad_sid)
        hist_med = sid_obj.sid_bin_values[med_bin.astype('int')]
        r_med_scaled = np.clip(r_init * hist_med / np.median(r_init),
                               a_min=min_depth,
                               a_max=max_depth)
        z_med_scaled = r_to_z(r_med_scaled, fc_kinect)

        # Find min and max depth across r and z separately
        min_r = min(np.min(a) for a in [gt_r, r_init, r_pred, r_med_scaled])
        max_r = max(np.max(a) for a in [gt_r, r_init, r_pred, r_med_scaled])
        min_z = min(
            np.min(a) for a in
            [gt_z, z_init, z_pred, z_med_scaled, gt_z_proj, gt_z_proj_crop])
        max_z = max(
            np.max(a) for a in
            [gt_z, z_init, z_pred, z_med_scaled, gt_z_proj, gt_z_proj_crop])
        mins_and_maxes = {
            "min_r": min_r,
            "max_r": max_r,
            "min_z": min_z,
            "max_z": max_z
        }
        np.save(os.path.join(scenedir, "mins_and_maxes.npy"), mins_and_maxes)

        # Save to figures
        print("Saving figures...")
        # spad_single_relevant w/ ambient estimate
        plt.figure()
        plt.bar(range(len(spad_single_relevant)),
                spad_single_relevant,
                log=True)
        plt.title("spad_single_relevant".format(scene))
        plt.axhline(y=ambient_estimate, color='r', linewidth=0.5)
        plt.tight_layout()
        plt.savefig(os.path.join(scenedir, "spad_single_relevant.pdf"))
        # gt_r and gt_z and gt_z_proj and gt_z_proj_crop and masks
        depth_imwrite(gt_r, min_r, max_r, os.path.join(scenedir, "gt_r"))
        depth_imwrite(gt_z, min_z, max_z, os.path.join(scenedir, "gt_z"))
        depth_imwrite(gt_z_proj, min_z, max_z,
                      os.path.join(scenedir, "gt_z_proj"))
        depth_imwrite(gt_z_proj_crop, min_z, max_z,
                      os.path.join(scenedir, "gt_z_proj_crop"))
        depth_imwrite(mask, 0., 1., os.path.join(scenedir, "mask"))
        depth_imwrite(mask_proj, 0., 1., os.path.join(scenedir, "mask_proj"))
        depth_imwrite(mask_proj_crop, 0., 1.,
                      os.path.join(scenedir, "mask_proj_crop"))
        depth_imwrite(intensity, 0., 1., os.path.join(scenedir, "intensity"))
        np.save(os.path.join(scenedir, "crop.npy"), crop)
        # spad_sid after preprocessing
        plt.figure()
        plt.bar(range(len(spad_sid)), spad_sid, log=True)
        plt.title("spad_sid")
        plt.tight_layout()
        plt.savefig(os.path.join(scenedir, "spad_sid.pdf"))
        # rgb, rgb_cropped, intensity
        cv2.imwrite(os.path.join(scenedir, "rgb.png"),
                    cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR))
        cv2.imwrite(os.path.join(scenedir, "rgb_cropped.png"),
                    cv2.cvtColor(rgb_cropped, cv2.COLOR_RGB2BGR))
        # r_init, z_init, diff_maps
        depth_imwrite(r_init, min_r, max_r, os.path.join(scenedir, "r_init"))
        depth_imwrite(z_init, min_z, max_z, os.path.join(scenedir, "z_init"))
        # r_pred, z_pred, diff_maps
        depth_imwrite(r_pred, min_r, max_r, os.path.join(scenedir, "r_pred"))
        depth_imwrite(z_pred, min_z, max_z, os.path.join(scenedir, "z_pred"))
        # r_med_scaled, z_med_scaled, diff_maps
        depth_imwrite(r_med_scaled, min_r, max_r,
                      os.path.join(scenedir, "r_med_scaled"))
        depth_imwrite(z_med_scaled, min_z, max_z,
                      os.path.join(scenedir, "z_med_scaled"))
        plt.close('all')

        # Compute metrics
        print("Computing error metrics...")
        # z_init
        # z_init_resized = cv2.resize(z_init, gt_z.shape)
        init_metrics = get_depth_metrics(
            torch.from_numpy(z_init).float(),
            torch.from_numpy(gt_z_proj_crop).float(),
            torch.from_numpy(mask_proj_crop).float())
        np.save(os.path.join(scenedir, "init_metrics.npy"), init_metrics)
        # z_pred
        # z_pred_resized = cv2.resize(z_pred, gt_z.shape)
        pred_metrics = get_depth_metrics(
            torch.from_numpy(z_pred).float(),
            torch.from_numpy(gt_z_proj_crop).float(),
            torch.from_numpy(mask_proj_crop).float())
        np.save(os.path.join(scenedir, "pred_metrics.npy"), pred_metrics)

        # z_med_scaled
        # z_med_scaled_resized = cv2.resize(z_med_scaled, gt_z.shape)
        med_scaled_metrics = get_depth_metrics(
            torch.from_numpy(z_med_scaled).float(),
            torch.from_numpy(gt_z_proj_crop).float(),
            torch.from_numpy(mask_proj_crop).float())
        np.save(os.path.join(scenedir, "med_scaled_metrics.npy"),
                med_scaled_metrics)