def test_cpp_iteration_data(self):
        optimizer = ho_cpp.HierarchicalOptimizer2d(
            tikhonov_term_enabled=False,
            gradient_kernel_enabled=False,
            maximum_chunk_size=8,
            rate=0.2,
            maximum_iteration_count=100,
            maximum_warp_update_threshold=0.001,
            data_term_amplifier=1.0,
            tikhonov_strength=0.0,
            kernel=sob.generate_1d_sobolev_kernel(size=7, strength=0.1),
            resampling_strategy=ho_cpp.HierarchicalOptimizer2d.
            ResamplingStrategy.NEAREST_AND_AVERAGE,
            verbosity_parameters=ho_cpp.HierarchicalOptimizer2d.
            VerbosityParameters(),
            logging_parameters=ho_cpp.HierarchicalOptimizer2d.
            LoggingParameters(collect_per_level_convergence_reports=True,
                              collect_per_level_iteration_data=True))
        warp_field_out = optimizer.optimize(test_data.canonical_field,
                                            test_data.live_field)
        final_warped_live = resampling.warp_field(test_data.live_field,
                                                  warp_field_out)
        data = optimizer.get_per_level_iteration_data()
        vec = data[3].get_warp_fields()

        self.assertTrue(
            np.allclose(vec[50], test_data.iteration50_warp_field, atol=1e-6))

        self.assertTrue(
            np.allclose(warp_field_out, test_data.warp_field, atol=10e-6))
        self.assertTrue(
            np.allclose(final_warped_live,
                        test_data.final_live_field,
                        atol=10e-6))
예제 #2
0
    def __init__(self,
                 tikhonov_term_enabled=False,
                 gradient_kernel_enabled=False,
                 maximum_chunk_size=8,
                 rate=0.2,
                 maximum_iteration_count=100,
                 maximum_warp_update_threshold=0.1,
                 data_term_amplifier=1.0,
                 tikhonov_strength=0.0,
                 kernel=sob.generate_1d_sobolev_kernel(size=7, strength=0.1)):
        self.tikhonov_term_enabled = tikhonov_term_enabled
        self.gradient_kernel_enabled = gradient_kernel_enabled

        self.maximum_chunk_size = maximum_chunk_size
        self.rate = rate
        self.maximum_iteration_count = maximum_iteration_count
        self.maximum_warp_update_threshold = maximum_warp_update_threshold

        self.data_term_amplifier = data_term_amplifier
        self.tikhonov_strength = tikhonov_strength
        self.kernel = kernel
def make_optimizer(compute_method, field_size, max_iterations=1):
    view_scaling_factor = 1024 // field_size
    optimizer = SlavchevaOptimizer2d(
        out_path="output/test_non_rigid_out",
        field_size=field_size,
        default_value=1,
        compute_method=compute_method,
        level_set_term_enabled=False,
        sobolev_smoothing_enabled=True,
        data_term_method=DataTermMethod.BASIC,
        smoothing_term_method=SmoothingTermMethod.TIKHONOV,
        adaptive_learning_rate_method=AdaptiveLearningRateMethod.NONE,
        data_term_weight=1.0,
        smoothing_term_weight=0.2,
        isomorphic_enforcement_factor=0.1,
        level_set_term_weight=0.2,
        maximum_warp_length_lower_threshold=0.05,
        max_iterations=max_iterations,
        sobolev_kernel=generate_1d_sobolev_kernel(size=3, strength=0.1),
        visualization_settings=SlavchevaVisualizer.Settings(
            enable_component_fields=True,
            view_scaling_factor=view_scaling_factor),
        enable_convergence_status_logging=True)
    return optimizer
예제 #4
0
def main():
    visualize_and_save_initial_and_final_fields = False
    field_size = 128
    default_value = 0
    view_scaling_factor = 8

    live_field, canonical_field, warp_field = generate_initial_fields(
        field_size=field_size,
        live_smoothing_kernel_size=0,
        canonical_smoothing_kernel_size=0,
        default_value=default_value)

    start_from_run = 0

    data_term_weights = [0.2, 0.3, 0.6]
    smoothing_term_weights = [0.1, 0.2, 0.3]
    sobolev_kernel_sizes = [3, 7, 9]
    sobolev_kernel_strengths = [0.1, 0.15]

    total_number_of_runs = len(data_term_weights) * len(smoothing_term_weights) * \
                           len(sobolev_kernel_sizes) * len(sobolev_kernel_strengths)

    end_before_run = total_number_of_runs
    current_run = 0

    max_iterations = 100
    maximum_warp_length_lower_threshold = 0.1

    for data_term_weight in data_term_weights:
        for smoothing_term_weight in smoothing_term_weights:
            for sobolev_kernel_size in sobolev_kernel_sizes:
                for sobolev_kernel_strength in sobolev_kernel_strengths:
                    if current_run < start_from_run:
                        current_run += 1
                        continue

                    if current_run >= end_before_run:
                        current_run += 1
                        continue

                    print("{:s}STARTING RUN {:0>6d}{:s}".format(
                        BOLD_LIGHT_CYAN, current_run, RESET))

                    input_parameters = {
                        "data_term_weight":
                        float(data_term_weight),
                        "smoothing_term_weight":
                        float(smoothing_term_weight),
                        "sobolev_kernel_size":
                        int(sobolev_kernel_size),
                        "sobolev_kernel_strength":
                        float(sobolev_kernel_strength),
                        "max_iterations":
                        max_iterations,
                        "maximum_warp_length_lower_threshold":
                        maximum_warp_length_lower_threshold
                    }
                    print("Input Parameters:")
                    print(
                        json.dumps(input_parameters, sort_keys=True, indent=4))
                    out_path = os.path.join(
                        "/media/algomorph/Data/Reconstruction/out_2D_SobolevFusionTuning",
                        "run{:0>6d}".format(current_run))
                    if not os.path.exists(out_path):
                        os.makedirs(out_path)
                    with open(os.path.join(out_path, "input_parameters.yaml"),
                              'w') as yaml_file:
                        yaml.dump(input_parameters,
                                  yaml_file,
                                  default_flow_style=False)

                    live_field_copy = live_field.copy()
                    canonical_field_copy = canonical_field.copy()
                    warp_field_copy = warp_field.copy()

                    optimizer = SlavchevaOptimizer2d(
                        out_path=out_path,
                        field_size=field_size,
                        data_term_weight=data_term_weight,
                        smoothing_term_weight=smoothing_term_weight,
                        level_set_term_weight=0.5,
                        sobolev_kernel=generate_1d_sobolev_kernel(
                            size=sobolev_kernel_size,
                            strength=sobolev_kernel_strength),
                        level_set_term_enabled=False,
                        maximum_warp_length_lower_threshold=
                        maximum_warp_length_lower_threshold,
                        max_iterations=max_iterations,
                        adaptive_learning_rate_method=AdaptiveLearningRateMethod
                        .NONE,
                        default_value=default_value,
                        enable_component_fields=True,
                        view_scaling_factor=view_scaling_factor)

                    optimizer.optimize(live_field_copy, canonical_field_copy,
                                       warp_field_copy)
                    optimizer.plot_logged_sdf_and_warp_magnitudes()
                    optimizer.plot_logged_energies_and_max_warps()

                    sdf_diff = float(np.sum((live_field - canonical_field)**2))

                    output_results = {
                        "sdf_diff":
                        sdf_diff,
                        "iterations":
                        len(optimizer.log.max_warps),
                        "final_max_warp_length":
                        float(optimizer.log.max_warps[-1]),
                        "initial_data_energy":
                        float(optimizer.log.data_energies[0]),
                        "final_data_energy":
                        float(optimizer.log.data_energies[-1]),
                        "initial_energy":
                        float(optimizer.log.data_energies[0] +
                              optimizer.log.smoothing_energies[0]),
                        "final_energy":
                        float(optimizer.log.data_energies[-1] +
                              optimizer.log.smoothing_energies[-1]),
                        "initial_smoothing_energy":
                        float(optimizer.log.smoothing_energies[0]),
                        "final_smoothing_energy":
                        float(optimizer.log.smoothing_energies[-1])
                    }
                    print("Tuning Results:")
                    print(json.dumps(output_results, sort_keys=True, indent=4))
                    with open(os.path.join(out_path, "results.yaml"),
                              'w') as yaml_file:
                        yaml.dump(output_results,
                                  yaml_file,
                                  default_flow_style=False)

                    touch_path = os.path.join(
                        out_path, "ran_for_{:4>d}_iterations".format(
                            len(optimizer.log.max_warps)))
                    with open(touch_path, 'a'):
                        os.utime(touch_path)

                    touch_path = os.path.join(
                        out_path, "sdf_diff_{:3.2f}".format(sdf_diff))
                    with open(touch_path, 'a'):
                        os.utime(touch_path)

                    print("{:s}FINISHED RUN {:0>6d}{:s}".format(
                        BOLD_LIGHT_CYAN, current_run, RESET))
                    current_run += 1

    return EXIT_CODE_SUCCESS
def main():
    args = process_arguments(
        Arguments,
        "Runs 2D hierarchical optimizer on TSDF inputs generated from frame-pairs "
        "& random pixel rows from these. Alternatively, generates the said data or "
        "loads it from a folder from further re-use.")
    post_process_enum_args(args, for_3d=True)
    perform_optimization = not Arguments.skip_optimization.v

    filter_method_name_substring, filter_smoothing_substring = get_filter_substrings(
    )
    data_subfolder = "tsdf_pairs_128_{:s}{:s}_{:02d}".format(
        filter_method_name_substring, filter_smoothing_substring,
        Arguments.dataset_number.v)
    data_path = os.path.join(pu.get_reconstruction_data_directory(),
                             "real_data/snoopy", data_subfolder)
    experiment_name = build_experiment_name(filter_method_name_substring,
                                            filter_smoothing_substring)

    print("Running experiment " + experiment_name)

    if Arguments.series_result_subfolder.v is None:
        out_path = os.path.join(args.output_path, experiment_name)
    else:
        out_path = os.path.join(args.output_path,
                                Arguments.series_result_subfolder.v,
                                experiment_name)

    convergence_reports_pickle_path = os.path.join(out_path,
                                                   "convergence_reports.pk")

    df = None
    if not args.analyze_only:
        create_folder_if_necessary(out_path)
        if args.generate_data:
            create_or_clear_folder(data_path)
        initial_fields = []
        frame_numbers_and_rows = []
        if args.generate_data:

            datasets = esr.prepare_datasets_for_2d_frame_pair_processing(
                calibration_path=os.path.join(
                    pu.get_reconstruction_data_directory(),
                    "real_data/snoopy/snoopy_calib.txt"),
                frame_directory=os.path.join(
                    pu.get_reconstruction_data_directory(),
                    "real_data/snoopy/frames"),
                output_directory=out_path,
                y_range=(214, 400),
                replace_empty_rows=True,
                use_masks=True,
                input_case_file=Arguments.generation_case_file.v,
                offset=np.array([-64, -64, 128]),
                field_size=128,
            )

            datasets = datasets[args.start_from_index:min(
                len(datasets), args.stop_before_index)]

            print("Generating initial fields...")
            initial_fields_folder = os.path.join(data_path, "images")
            if args.save_initial_fields_during_generation:
                create_folder_if_necessary(initial_fields_folder)

            for dataset in progressbar.progressbar(datasets):
                canonical_field, live_field = dataset.generate_3d_sdf_fields(
                    args.generation_method, args.smoothing_coefficient)
                initial_fields.append((canonical_field, live_field))
                if args.generate_data:
                    canonical_frame = infer_frame_number_from_filename(
                        dataset.first_frame_path)
                    pixel_row = dataset.image_pixel_row
                    frame_numbers_and_rows.append((canonical_frame, pixel_row))
                    np.savez(os.path.join(
                        data_path,
                        "data_{:d}_{:d}".format(canonical_frame, pixel_row)),
                             canonical=canonical_field,
                             live=live_field)
                    if args.save_initial_fields_during_generation:
                        live_frame = canonical_frame + 1
                        canonical_image_path = os.path.join(
                            initial_fields_folder,
                            "tsdf_frame_{:06d}.png".format(canonical_frame))
                        viz.save_field(canonical_field, canonical_image_path,
                                       1024 // dataset.field_size)
                        live_image_path = os.path.join(
                            initial_fields_folder,
                            "tsdf_frame_{:06d}.png".format(live_frame))
                        viz.save_field(live_field, live_image_path,
                                       1024 // dataset.field_size)

                sys.stdout.flush()
        else:
            files = os.listdir(data_path)
            files.sort()
            if files[len(files) - 1] == "images":
                files = files[:-1]
            print("Loading initial fields from {:s}...".format(data_path))
            for file in files:
                frame_numbers_and_rows.append(
                    infer_frame_number_and_pixel_row_from_filename(file))
            if Arguments.optimization_case_file.v is not None:
                files, frame_numbers_and_rows = \
                    filter_files_based_on_case_file(Arguments.optimization_case_file.v, frame_numbers_and_rows, files)
            for file in progressbar.progressbar(files):
                archive = np.load(os.path.join(data_path, file))
                initial_fields.append((archive["canonical"], archive["live"]))

        # limit ranges
        frame_numbers_and_rows = frame_numbers_and_rows[
            args.start_from_index:min(len(frame_numbers_and_rows), args.
                                      stop_before_index)]
        initial_fields = initial_fields[args.start_from_index:min(
            len(initial_fields), args.stop_before_index)]

        telemetry_logs = []
        telemetry_folder = os.path.join(out_path, "telemetry")
        if perform_optimization:

            optimizer = cpp_module.HierarchicalOptimizer3d(
                tikhonov_term_enabled=Arguments.tikhonov_term_enabled.v,
                gradient_kernel_enabled=Arguments.gradient_kernel_enabled.v,
                maximum_chunk_size=8,
                rate=Arguments.rate.v,
                maximum_iteration_count=Arguments.maximum_iteration_count.v,
                maximum_warp_update_threshold=Arguments.
                maximum_warp_update_threshold.v,
                data_term_amplifier=Arguments.data_term_amplifier.v,
                tikhonov_strength=Arguments.tikhonov_strength.v,
                kernel=sob.generate_1d_sobolev_kernel(
                    Arguments.kernel_size.v, Arguments.kernel_strength.v),
                resampling_strategy=Arguments.resampling_strategy.v,
                verbosity_parameters=cpp_module.HierarchicalOptimizer3d.
                VerbosityParameters(),
                logging_parameters=cpp_module.HierarchicalOptimizer3d.
                LoggingParameters(collect_per_level_convergence_reports=True,
                                  collect_per_level_iteration_data=Arguments.
                                  save_telemetry.v))

            convergence_report_sets = []
            if Arguments.save_initial_and_final_fields.v or Arguments.save_telemetry.v:
                create_folder_if_necessary(telemetry_folder)

            if args.save_telemetry:
                # make all the necessary subfolders
                for frame_number, pixel_row in frame_numbers_and_rows:
                    telemetry_subfolder = get_telemetry_subfolder_path(
                        telemetry_folder, frame_number, pixel_row)
                    create_folder_if_necessary(telemetry_subfolder)

            print("Optimizing...")
            i_pair = 0
            for (canonical_field,
                 live_field) in progressbar.progressbar(initial_fields):
                (frame_number, pixel_row) = frame_numbers_and_rows[i_pair]
                live_copy = live_field.copy()
                warp_field_out = optimizer.optimize(canonical_field,
                                                    live_field)

                if args.save_telemetry:
                    if args.implementation_language == build_opt.ImplementationLanguage.CPP:
                        telemetry_logs.append(
                            optimizer.get_per_level_iteration_data())
                    else:
                        optimizer.visualization_parameters.out_path = \
                            get_telemetry_subfolder_path(telemetry_folder, frame_number, pixel_row)
                if Arguments.save_initial_and_final_fields.v:
                    if not Arguments.save_telemetry.v:
                        frame_file_prefix = "pair_{:d}-{:d}_{:d}".format(
                            frame_number, frame_number + 1, pixel_row)
                        final_live_path = os.path.join(
                            telemetry_folder,
                            frame_file_prefix + "_final_live.png")
                        canonical_path = os.path.join(
                            telemetry_folder,
                            frame_file_prefix + "_canonical.png")
                        initial_live_path = os.path.join(
                            telemetry_folder,
                            frame_file_prefix + "_initial_live.png")
                    else:
                        telemetry_subfolder = get_telemetry_subfolder_path(
                            telemetry_folder, frame_number, pixel_row)
                        final_live_path = os.path.join(telemetry_subfolder,
                                                       "final_live.png")
                        canonical_path = os.path.join(telemetry_subfolder,
                                                      "canonical.png")
                        initial_live_path = os.path.join(
                            telemetry_subfolder, "live.png")
                    final_live_resampled = resampling.warp_field(
                        live_field, warp_field_out)
                    scale = 1024 // final_live_resampled.shape[0]
                    viz.save_field(final_live_resampled, final_live_path,
                                   scale)
                    viz.save_field(canonical_field, canonical_path, scale)
                    viz.save_field(live_copy, initial_live_path, scale)

                convergence_reports = optimizer.get_per_level_convergence_reports(
                )
                convergence_report_sets.append(convergence_reports)
                i_pair += 1

            print("Post-processing convergence reports...")
            df = post_process_convergence_report_sets(convergence_report_sets,
                                                      frame_numbers_and_rows)
            reports_file_name = "convergence_reports"
            if Arguments.optimization_case_file.v is not None:
                reports_file_name = "case_convergence_reports"
            df.to_excel(
                os.path.join(out_path, "{:s}.xlsx".format(reports_file_name)))
            df.to_pickle(
                os.path.join(out_path, "{:s}.pk".format(reports_file_name)))

        if Arguments.save_telemetry.v and \
                Arguments.implementation_language.v == build_opt.ImplementationLanguage.CPP and \
                len(telemetry_logs) > 0:
            print("Saving C++-based telemetry (" + telemetry_folder + ")...")
            i_pair = 0
            telemetry_metadata = ho_viz.get_telemetry_metadata(
                telemetry_logs[0])
            for telemetry_log in progressbar.progressbar(telemetry_logs):
                (frame_number, pixel_row) = frame_numbers_and_rows[i_pair]
                telemetry_subfolder = get_telemetry_subfolder_path(
                    telemetry_folder, frame_number, pixel_row)
                ho_viz.save_telemetry_log(telemetry_log, telemetry_metadata,
                                          telemetry_subfolder)
                i_pair += 1

        if Arguments.convert_telemetry.v and \
                Arguments.implementation_language.v == build_opt.ImplementationLanguage.CPP:
            # TODO: attempt to load telemetry if the array is empty
            if len(telemetry_logs) == 0:
                print("Loading C++-based telemetry (" + telemetry_folder +
                      ")...")
                for frame_number, pixel_row in progressbar.progressbar(
                        frame_numbers_and_rows):
                    telemetry_subfolder = get_telemetry_subfolder_path(
                        telemetry_folder, frame_number, pixel_row)
                    telemetry_log = ho_viz.load_telemetry_log(
                        telemetry_subfolder)
                    telemetry_logs.append(telemetry_log)

            print("Converting C++-based telemetry to videos (" +
                  telemetry_folder + ")...")
            i_pair = 0
            total_frame_count = ho_viz.get_number_of_frames_to_save_from_telemetry_logs(
                telemetry_logs)
            bar = progressbar.ProgressBar(max_value=total_frame_count)
            telemetry_metadata = ho_viz.get_telemetry_metadata(
                telemetry_logs[0])
            for telemetry_log in telemetry_logs:
                canonical_field, live_field = initial_fields[i_pair]
                (frame_number, pixel_row) = frame_numbers_and_rows[i_pair]
                telemetry_subfolder = get_telemetry_subfolder_path(
                    telemetry_folder, frame_number, pixel_row)
                ho_viz.convert_cpp_telemetry_logs_to_video(
                    telemetry_log, telemetry_metadata, canonical_field,
                    live_field, telemetry_subfolder, bar)
                i_pair += 1

    else:
        df = pd.read_pickle(convergence_reports_pickle_path)

    if df is not None:
        analyze_convergence_data(df, out_path)
        if not Arguments.optimization_case_file.v:
            save_bad_cases(df, out_path)
            save_all_cases(df, out_path)

    print()

    return EXIT_CODE_SUCCESS
def main():
    data_to_use = ds.PredefinedDatasetEnum.REAL3D_SNOOPY_SET05
    tsdf_generation_method = cpp.tsdf.FilteringMethod.NONE

    out_path = "output/ho3d/single"
    if not os.path.exists(out_path):
        os.makedirs(out_path)

    generate_test_data = False

    live_field, canonical_field = \
        ds.datasets[data_to_use].generate_3d_sdf_fields(method=tsdf_generation_method, smoothing_coefficient=0.5)

    view_scaling_factor = 1024 // ds.datasets[data_to_use].field_size

    if generate_test_data:
        live_field = live_field[36:52, 21:37].copy()
        canonical_field = canonical_field[36:52, 21:37].copy()

    maximum_warp_update_threshold = 0.01
    maximum_iteration_count = 100

    verbosity_parameters_cpp = cpp.HierarchicalOptimizer3d.VerbosityParameters(
        print_max_warp_update=True,
        print_iteration_mean_tsdf_difference=True,
        print_iteration_std_tsdf_difference=True,
        print_iteration_data_energy=True,
        print_iteration_tikhonov_energy=True,
    )

    logging_parameters_cpp = cpp.HierarchicalOptimizer3d.LoggingParameters(
        collect_per_level_convergence_reports=True,
        collect_per_level_iteration_data=False)
    resampling_strategy_cpp = cpp.HierarchicalOptimizer3d.ResamplingStrategy.NEAREST_AND_AVERAGE
    # resampling_strategy_cpp = ho_cpp.HierarchicalOptimizer3d.ResamplingStrategy.LINEAR

    optimizer = cpp.HierarchicalOptimizer3d(
        tikhonov_term_enabled=False,
        gradient_kernel_enabled=False,
        maximum_chunk_size=8,
        rate=0.1,
        maximum_iteration_count=maximum_iteration_count,
        maximum_warp_update_threshold=maximum_warp_update_threshold,
        data_term_amplifier=1.0,
        tikhonov_strength=0.0,
        kernel=sob.generate_1d_sobolev_kernel(size=7, strength=0.1),
        resampling_strategy=resampling_strategy_cpp,
        verbosity_parameters=verbosity_parameters_cpp,
        logging_parameters=logging_parameters_cpp)

    warp_field = optimizer.optimize(canonical_field, live_field)
    print("Warp [min, mean, max]:", warp_field.min(), warp_field.mean(),
          warp_field.max())

    print(
        "==================================================================================="
    )
    print_convergence_reports(optimizer.get_per_level_convergence_reports())
    # telemetry_log = optimizer.get_per_level_iteration_data()
    # metadata = viz_ho.get_telemetry_metadata(telemetry_log)
    # frame_count = viz_ho.get_number_of_frames_to_save_from_telemetry_logs([telemetry_log])
    # progress_bar = progressbar.ProgressBar(max_value=frame_count)
    # viz_ho.convert_cpp_telemetry_logs_to_video(telemetry_log, metadata, canonical_field, live_field, out_path,
    #                                            progress_bar=progress_bar)

    # warped_live = resampling.warp_field(live_field, warp_field)

    return EXIT_CODE_SUCCESS
예제 #7
0
def perform_single_test(depth_interpolation_method=GenerationMethod.BASIC,
                        out_path="output/out2D",
                        frame_path="",
                        calibration_path="calib.txt",
                        canonical_frame_index=-1,
                        pixel_row_index=-1,
                        z_offset=128,
                        draw_tsdfs_and_exit=False):
    visualize_and_save_initial_and_final_fields = False
    field_size = 128
    default_value = 1

    if pixel_row_index < 0 and canonical_frame_index < 0:
        data_to_use = PredefinedDatasetEnum.REAL3D_SNOOPY_SET04

        if data_to_use == PredefinedDatasetEnum.GENEREATED2D:
            live_field, canonical_field = \
                generate_initial_orthographic_2d_tsdf_fields(field_size=field_size,
                                                             live_smoothing_kernel_size=0,
                                                             canonical_smoothing_kernel_size=0,
                                                             default_value=default_value)
        else:
            live_field, canonical_field = \
                datasets[data_to_use].generate_2d_sdf_fields(method=depth_interpolation_method)
            field_size = datasets[data_to_use].field_size
    else:
        frame_count, frame_filename_format, use_masks = shared.check_frame_count_and_format(
            frame_path)
        if frame_filename_format == shared.FrameFilenameFormat.SIX_DIGIT:
            frame_path_format_string = frame_path + os.path.sep + "depth_{:0>6d}.png"
            mask_path_format_string = frame_path + os.path.sep + "mask_{:0>6d}.png"
        else:  # has to be FIVE_DIGIT
            frame_path_format_string = frame_path + os.path.sep + "depth_{:0>5d}.png"
            mask_path_format_string = frame_path + os.path.sep + "mask_{:0>5d}.png"
        live_frame_index = canonical_frame_index + 1
        canonical_frame_path = frame_path_format_string.format(
            canonical_frame_index)
        canonical_mask_path = mask_path_format_string.format(
            canonical_frame_index)
        live_frame_path = frame_path_format_string.format(live_frame_index)
        live_mask_path = mask_path_format_string.format(live_frame_index)

        offset = [-64, -64, z_offset]
        # Generate SDF fields
        if use_masks:
            dataset = MaskedImageBasedFramePairDataset(
                calibration_path, canonical_frame_path, canonical_mask_path,
                live_frame_path, live_mask_path, pixel_row_index, field_size,
                offset)
        else:
            dataset = ImageBasedFramePairDataset(calibration_path,
                                                 canonical_frame_path,
                                                 live_frame_path,
                                                 pixel_row_index, field_size,
                                                 offset)

        live_field, canonical_field = dataset.generate_2d_sdf_fields(
            method=depth_interpolation_method)

    warp_field = np.zeros((field_size, field_size, 2), dtype=np.float32)
    view_scaling_factor = 1024 // field_size

    if visualize_and_save_initial_and_final_fields:
        visualize_and_save_initial_fields(canonical_field, live_field,
                                          out_path, view_scaling_factor)

    if draw_tsdfs_and_exit:
        return

    optimizer = SlavchevaOptimizer2d(
        out_path=out_path,
        field_size=field_size,
        default_value=default_value,
        compute_method=ComputeMethod.VECTORIZED,
        level_set_term_enabled=False,
        sobolev_smoothing_enabled=True,
        data_term_method=DataTermMethod.BASIC,
        smoothing_term_method=SmoothingTermMethod.TIKHONOV,
        adaptive_learning_rate_method=AdaptiveLearningRateMethod.NONE,
        data_term_weight=1.0,
        smoothing_term_weight=0.2,
        isomorphic_enforcement_factor=0.1,
        level_set_term_weight=0.2,
        maximum_warp_length_lower_threshold=0.05,
        max_iterations=100,
        sobolev_kernel=generate_1d_sobolev_kernel(
            size=7 if field_size > 7 else 3, strength=0.1),
        visualization_settings=SlavchevaVisualizer.Settings(
            enable_component_fields=True,
            view_scaling_factor=view_scaling_factor))

    start_time = time.time()
    optimizer.optimize(live_field, canonical_field)
    end_time = time.time()
    print("Total optimization runtime: {:f}".format(end_time - start_time))
    optimizer.plot_logged_sdf_and_warp_magnitudes()
    optimizer.plot_logged_energies_and_max_warps()

    if visualize_and_save_initial_and_final_fields:
        visualize_final_fields(canonical_field, live_field,
                               view_scaling_factor)