예제 #1
0
def resample_field_replacement(field, warp_field, replacement):
    """
    - Accepts a scalar field and a vector field [supposedly of the same dimensions & size -- not checked].
    - Creates a new scalar field of the same dimensions
    - For each location of the vector field, performs a bilinear lookup in the scalar field
    - If a vector is pointing outside of the bounds of the input fields, uses the replacement during
    the interpolation process for any "out-of-bounds" spots.
    - Stores the results of each lookup in the corresponding location of the new scalar field
    :param field: the scalar field containing source values
    :param vector_field: 2d vector field to use for bilinear lookups
    :return: the resulting scalar field
    """
    resampled_field = np.ones_like(field)
    for y in range(field.shape[0]):
        for x in range(field.shape[1]):
            warped_location = Point2d(
                x, y) + Point2d(coordinates=warp_field[y, x])
            new_value = sampling.bilinear_sample_at_replacement(
                field, point=warped_location, replacement=replacement)
            resampled_field[y, x] = new_value
    return resampled_field
def get_and_print_interpolation_data(canonical_field,
                                     warped_live_field,
                                     warp_field,
                                     x,
                                     y,
                                     band_union_only=False,
                                     known_values_only=False,
                                     substitute_original=False):
    # TODO: use in interpolation function (don't forget the component fields and the updates) to avoid DRY violation
    original_live_sdf = warped_live_field[y, x]
    original_live_sdf = warped_live_field[y, x]
    if band_union_only:
        canonical_sdf = canonical_field[y, x]
        if value_outside_narrow_band(
                original_live_sdf) and value_outside_narrow_band(
                    canonical_sdf):
            return
    if known_values_only:
        if original_live_sdf == 1.0:
            return

    warped_location = Point2d(x, y) + Point2d(coordinates=warp_field[y, x])

    if substitute_original:
        new_value, metainfo = sampling.bilinear_sample_at_replacement_metainfo(
            warped_live_field,
            point=warped_location,
            replacement=original_live_sdf)
    else:
        new_value, metainfo = sampling.bilinear_sample_at_metainfo(
            warped_live_field, point=warped_location)

    if 1.0 - abs(new_value) < 1e-6:
        new_value = np.sign(new_value)

    print_interpolation_data(metainfo, original_live_sdf, new_value)
예제 #3
0
def interpolate_bilinearly(values, ratios):
    """
    :param values: iterable of 4 values, representing discrete points, in the order {([x=]0,[y=]0), (01), (10), (11)}
    :type ratios: Point2d
    :param ratios: distances from point (00) to the sample point along x and y axes, respectively
    :return:
    """
    inverse_ratios = Point2d(1.0, 1.0) - ratios

    value00 = values[0]
    value01 = values[1]
    value10 = values[2]
    value11 = values[3]
    interpolated_value0 = value00 * inverse_ratios.y + value01 * ratios.y
    interpolated_value1 = value10 * inverse_ratios.y + value11 * ratios.y
    interpolated_value = interpolated_value0 * inverse_ratios.x + interpolated_value1 * ratios.x
    return interpolated_value
예제 #4
0
def bilinear_sample_at_replacement(field, x=0, y=0, point=None, replacement=1):
    """
    Sample from a 2D scalar field at a given coordinate with bilinear interpolation.
    If coordinate is out-of-bounds, uses the replacement argument for all samples that fall out of bounds during
    the interpolation.
    Works with either a named point argument or x and y coordinates positionally following the filed argument.
    In case all three are specified and point is not None, it will override the x and y arguments.
    :param replacement: value to use as replacement when sampling out-of-bounds
    :param field: field from which to sample
    :type field: numpy.ndarray
    :param x: x coordinate for sampling location
    :type x: int
    :param y: y coordinate for sampling location
    :type y: int
    :param point: full coordinate for sampling location.
    :type point: Point2d
    :return: bilinearly interpolated scalar value at given coordinate if (x,y) are within bounds of the scalar field,
     1 otherwise
    """
    if point is not None:
        x = point.x
        y = point.y

    # if x < 0 or x >= field.shape[1] or y < 0 or y >= field.shape[0]:
    #     return replacement

    point = Point2d(x, y)

    base_point = Point2d(math.floor(point.x), math.floor(point.y))
    ratios = point - base_point
    inverse_ratios = Point2d(1.0, 1.0) - ratios

    value00 = sample_at_replacement(field, replacement, point=base_point)
    value01 = sample_at_replacement(field, replacement, point=base_point + Point2d(0, 1))
    value10 = sample_at_replacement(field, replacement, point=base_point + Point2d(1, 0))
    value11 = sample_at_replacement(field, replacement, point=base_point + Point2d(1, 1))

    interpolated_value0 = value00 * inverse_ratios.y + value01 * ratios.y
    interpolated_value1 = value10 * inverse_ratios.y + value11 * ratios.y
    interpolated_value = interpolated_value0 * inverse_ratios.x + interpolated_value1 * ratios.x

    return interpolated_value
예제 #5
0
def bilinear_sample_at_metainfo(field, x=0, y=0, point=None):
    """
    Sample from a 2D scalar field at a given coordinate with bilinear interpolation.
    If coordinate is out-of-bounds, uses "1" for all samples that fall out of bounds during the interpolation.
    Works with either a named point argument or x and y coordinates positionally following the filed argument.
    In case all three are specified and point is not None, it will override the x and y arguments.
    :param field: field from which to sample
    :type field: numpy.ndarray
    :param x: x coordinate for sampling location
    :type x: int
    :param y: y coordinate for sampling location
    :type y: int
    :param point: full coordinate for sampling location.
    :type point: Point2d
    :return: bilinearly interpolated scalar value at given coordinate if (x,y) are within bounds of the scalar field,
     1 otherwise
    """
    if point is not None:
        x = point.x
        y = point.y

    point = Point2d(x, y)

    base_point = Point2d(math.floor(point.x), math.floor(point.y))
    ratios = point - base_point
    inverse_ratios = Point2d(1.0, 1.0) - ratios

    value00 = sample_at(field, point=base_point)
    value01 = sample_at(field, point=base_point + Point2d(0, 1))
    value10 = sample_at(field, point=base_point + Point2d(1, 0))
    value11 = sample_at(field, point=base_point + Point2d(1, 1))

    interpolated_value0 = value00 * inverse_ratios.y + value01 * ratios.y
    interpolated_value1 = value10 * inverse_ratios.y + value11 * ratios.y
    interpolated_value = interpolated_value0 * inverse_ratios.x + interpolated_value1 * ratios.x

    metainfo = BilinearSamplingMetaInfo(value00, value01, value10, value11, ratios, inverse_ratios)

    return interpolated_value, metainfo
예제 #6
0
def get_bilinear_ratios(x, y):
    point = Point2d(x, y)
    base_point = Point2d(math.floor(x), math.floor(y))
    ratios = point - base_point
    return ratios
def warp_field_with_with_flag_info(warped_live_field, warp_field, update_field,
                                   flag_field):
    field_size = warp_field.shape[0]
    new_warped_live_field = np.ones_like(warped_live_field)
    for y in range(field_size):
        for x in range(field_size):
            warped_location = Point2d(
                x, y) + Point2d(coordinates=warp_field[y, x])
            base_point = Point2d(math.floor(warped_location.x),
                                 math.floor(warped_location.y))
            ratios = warped_location - base_point
            inverse_ratios = Point2d(1.0, 1.0) - ratios
            original_value = warped_live_field[y, x]
            value00 = sampling.sample_at(warped_live_field, point=base_point)
            flag00 = sampling.sample_flag_at(flag_field, point=base_point)
            used_replacement = False
            if flag00 == 0:
                value00 = original_value
                used_replacement = True
            value01 = sampling.sample_at(warped_live_field,
                                         point=base_point + Point2d(0, 1))
            flag01 = sampling.sample_flag_at(flag_field,
                                             point=base_point + Point2d(0, 1))
            if flag01 == 0:
                value01 = original_value
                used_replacement = True
            value10 = sampling.sample_at(warped_live_field,
                                         point=base_point + Point2d(1, 0))
            flag10 = sampling.sample_flag_at(flag_field,
                                             point=base_point + Point2d(1, 0))
            if flag10 == 0:
                value10 = original_value
                used_replacement = True
            value11 = sampling.sample_at(warped_live_field,
                                         point=base_point + Point2d(1, 1))
            flag11 = sampling.sample_flag_at(flag_field,
                                             point=base_point + Point2d(1, 1))
            if flag11 == 0:
                value11 = original_value
                used_replacement = True

            interpolated_value0 = value00 * inverse_ratios.y + value01 * ratios.y
            interpolated_value1 = value10 * inverse_ratios.y + value11 * ratios.y
            interpolated_value = interpolated_value0 * inverse_ratios.x + interpolated_value1 * ratios.x
            if 1.0 - abs(interpolated_value) < 1e-3:
                # if 1.0 - abs(interpolated_value) < 0.05:
                interpolated_value = np.sign(interpolated_value)
                warp_field[y, x] = 0.0
                update_field[y, x] = 0.0

            if sampling.focus_coordinates_match(x, y):
                print("[Interpolation data] ",
                      BOLD_YELLOW,
                      "{:+03.3f}*{:03.3f}, {:+03.3f}*{:03.3f}".format(
                          value00,
                          inverse_ratios.y * inverse_ratios.x,
                          value10,
                          inverse_ratios.y * ratios.x,
                      ),
                      RESET,
                      sep='')
                print("                     ",
                      BOLD_YELLOW,
                      "{:+03.3f}*{:03.3f}, {:+03.3f}*{:03.3f}".format(
                          value01, ratios.y * inverse_ratios.x, value11,
                          ratios.y * ratios.x),
                      RESET,
                      " used replacement:",
                      BOLD_GREEN,
                      used_replacement,
                      RESET,
                      " final value: ",
                      BOLD_GREEN,
                      interpolated_value,
                      RESET,
                      sep='')
            new_warped_live_field[y, x] = interpolated_value
    np.copyto(warped_live_field, new_warped_live_field)
예제 #8
0
    def __optimization_iteration_direct(self,
                                        warped_live_field,
                                        canonical_field,
                                        warp_field,
                                        data_component_field=None,
                                        smoothing_component_field=None,
                                        level_set_component_field=None,
                                        band_union_only=True):

        self.total_data_energy = 0.
        self.total_smoothing_energy = 0.
        self.total_level_set_energy = 0.

        field_size = warp_field.shape[0]

        live_gradient_y, live_gradient_x = np.gradient(warped_live_field)

        for y in range(0, field_size):
            for x in range(0, field_size):
                if focus_coordinates_match(x, y):
                    print("Point: ", x, ",", y, sep='', end='')

                gradient = 0.0

                live_sdf = warped_live_field[y, x]

                live_is_truncated = value_outside_narrow_band(live_sdf)

                if band_union_only and voxel_is_outside_narrow_band_union(
                        warped_live_field, canonical_field, x, y):
                    continue

                data_gradient, local_data_energy = \
                    dt.compute_local_data_term(warped_live_field, canonical_field, x, y, live_gradient_x,
                                               live_gradient_y, method=self.data_term_method)
                scaled_data_gradient = self.data_term_weight * data_gradient
                self.total_data_energy += self.data_term_weight * local_data_energy
                gradient += scaled_data_gradient
                if focus_coordinates_match(x, y):
                    print(" Data grad: ",
                          BOLD_GREEN,
                          -data_gradient,
                          RESET,
                          sep='',
                          end='')
                if data_component_field is not None:
                    data_component_field[y, x] = data_gradient
                if self.level_set_term_enabled and not live_is_truncated:
                    level_set_gradient, local_level_set_energy = \
                        level_set_term_at_location(warped_live_field, x, y)
                    scaled_level_set_gradient = self.level_set_term_weight * level_set_gradient
                    self.total_level_set_energy += self.level_set_term_weight * local_level_set_energy
                    gradient += scaled_level_set_gradient
                    if level_set_component_field is not None:
                        level_set_component_field[y, x] = level_set_gradient
                    if focus_coordinates_match(x, y):
                        print(" Level-set grad (scaled): ",
                              BOLD_GREEN,
                              -scaled_level_set_gradient,
                              RESET,
                              sep='',
                              end='')

                smoothing_gradient, local_smoothing_energy = \
                    st.compute_local_smoothing_term_gradient(warp_field, x, y, method=self.smoothing_term_method,
                                                             copy_if_zero=False,
                                                             isomorphic_enforcement_factor=
                                                             self.isomorphic_enforcement_factor)
                scaled_smoothing_gradient = self.smoothing_term_weight * smoothing_gradient
                self.total_smoothing_energy += self.smoothing_term_weight * local_smoothing_energy
                gradient += scaled_smoothing_gradient
                if smoothing_component_field is not None:
                    smoothing_component_field[y, x] = smoothing_gradient
                if focus_coordinates_match(x, y):
                    print(" Smoothing grad (scaled): ",
                          BOLD_GREEN,
                          -scaled_smoothing_gradient,
                          RESET,
                          sep='',
                          end='')

                self.gradient_field[y, x] = gradient

        if self.sobolev_smoothing_enabled:
            convolve_with_kernel_preserve_zeros(self.gradient_field,
                                                self.sobolev_kernel, True)

        max_warp = 0.0
        max_warp_location = -1

        # update the warp field based on the gradient
        for y in range(0, field_size):
            for x in range(0, field_size):
                warp_field[y, x] = -self.gradient_field[
                    y, x] * self.gradient_descent_rate
                if focus_coordinates_match(x, y):
                    print(" Warp: ",
                          BOLD_GREEN,
                          warp_field[y, x],
                          RESET,
                          " Warp length: ",
                          BOLD_GREEN,
                          np.linalg.norm(warp_field[y, x]),
                          RESET,
                          sep='')
                warp_length = np.linalg.norm(warp_field[y, x])
                if warp_length > max_warp:
                    max_warp = warp_length
                    max_warp_location = Point2d(x, y)
                if (x, y) in self.focus_neighborhood_log:
                    log = self.focus_neighborhood_log[(x, y)]
                    log.warp_magnitudes.append(warp_length)
                    log.sdf_values.append(warped_live_field[y, x])

        new_warped_live_field = resample_warped_live(canonical_field,
                                                     warped_live_field,
                                                     warp_field,
                                                     self.gradient_field,
                                                     band_union_only=False,
                                                     known_values_only=False,
                                                     substitute_original=False)
        np.copyto(warped_live_field, new_warped_live_field)

        return max_warp, max_warp_location
예제 #9
0
    def __optimization_iteration_vectorized(self,
                                            warped_live_field,
                                            canonical_field,
                                            warp_field,
                                            band_union_only=True):

        live_gradient_y, live_gradient_x = np.gradient(warped_live_field)
        data_gradient_field = dt.compute_data_term_gradient_vectorized(
            warped_live_field, canonical_field, live_gradient_x,
            live_gradient_y)
        set_zeros_for_values_outside_narrow_band_union(warped_live_field,
                                                       canonical_field,
                                                       data_gradient_field)
        self.total_data_energy = \
            dt.compute_data_term_energy_contribution(warped_live_field, canonical_field) * self.data_term_weight
        smoothing_gradient_field = st.compute_smoothing_term_gradient_vectorized(
            warp_field)
        self.total_smoothing_energy = \
            st.compute_smoothing_term_energy(warp_field, warped_live_field,
                                             canonical_field) * self.smoothing_term_weight

        if self.visualizer.data_component_field is not None:
            np.copyto(self.visualizer.data_component_field,
                      data_gradient_field)
        if self.visualizer.smoothing_component_field is not None:
            np.copyto(self.visualizer.smoothing_component_field,
                      smoothing_gradient_field)
        if self.visualizer.level_set_component_field is not None:
            frame_info = getframeinfo(currentframe())
            print(
                "Warning: level set term not implemented in vectorized version, "
                "passed level_set_component_field is not None, {:s} : {:d}".
                format(frame_info.filename, frame_info.lineno))

        self.gradient_field = self.data_term_weight * data_gradient_field + \
                              self.smoothing_term_weight * smoothing_gradient_field

        if band_union_only:
            set_zeros_for_values_outside_narrow_band_union(
                warped_live_field, canonical_field, self.gradient_field)

        # *** Print information at focus voxel
        focus_x, focus_y = get_focus_coordinates()
        focus = (focus_y, focus_x)
        print("Point: ", focus_x, ",", focus_y, sep='', end='')
        dt.compute_local_data_term(warped_live_field,
                                   canonical_field,
                                   focus_x,
                                   focus_y,
                                   live_gradient_x,
                                   live_gradient_y,
                                   method=dt.DataTermMethod.BASIC)
        focus_data_gradient = data_gradient_field[focus]
        print(" Data grad: ",
              BOLD_GREEN,
              -focus_data_gradient,
              RESET,
              sep='',
              end='')

        st.compute_local_smoothing_term_gradient(
            warp_field,
            focus_x,
            focus_y,
            method=self.smoothing_term_method,
            copy_if_zero=False,
            isomorphic_enforcement_factor=self.isomorphic_enforcement_factor)
        focus_smoothing_gradient = smoothing_gradient_field[
            focus] * self.smoothing_term_weight
        print(" Smoothing grad (scaled): ",
              BOLD_GREEN,
              -focus_smoothing_gradient,
              RESET,
              sep='',
              end='')

        # ***
        if self.sobolev_smoothing_enabled:
            convolve_with_kernel_preserve_zeros(self.gradient_field,
                                                self.sobolev_kernel, True)

        np.copyto(warp_field,
                  -self.gradient_field * self.gradient_descent_rate)
        warp_lengths = np.linalg.norm(warp_field, axis=2)
        maximum_warp_length_at = np.unravel_index(np.argmax(warp_lengths),
                                                  warp_lengths.shape)
        maximum_warp_length = warp_lengths[maximum_warp_length_at]

        # ***
        print(" Warp: ",
              BOLD_GREEN,
              warp_field[focus],
              RESET,
              " Warp length: ",
              BOLD_GREEN,
              np.linalg.norm(warp_field[focus]),
              RESET,
              sep='')
        # ***

        get_and_print_interpolation_data(canonical_field, warped_live_field,
                                         warp_field, focus_x, focus_y)

        u_vectors = warp_field[:, :, 0].copy()
        v_vectors = warp_field[:, :, 1].copy()

        out_warped_live_field, (out_u_vectors, out_v_vectors) = \
            cpp_extension.resample(warped_live_field, canonical_field, u_vectors, v_vectors)

        np.copyto(warped_live_field, out_warped_live_field)

        # some entries might have been erased due to things in the live sdf becoming truncated
        warp_field[:, :, 0] = out_u_vectors
        warp_field[:, :, 1] = out_v_vectors

        return maximum_warp_length, Point2d(maximum_warp_length_at[1],
                                            maximum_warp_length_at[0])
def perform_multiple_tests(
        start_from_sample=0,
        data_term_method=DataTermMethod.BASIC,
        optimizer_choice=OptimizerChoice.CPP,
        depth_interpolation_method=GenerationMethod.BASIC,
        out_path="out2D/Snoopy MultiTest",
        input_case_file=None,
        calibration_path="/media/algomorph/Data/Reconstruction/real_data/snoopy/snoopy_calib.txt",
        frame_path="/media/algomorph/Data/Reconstruction/real_data/snoopy/frames/",
        z_offset=128):
    # CANDIDATES FOR ARGS
    save_initial_and_final_fields = input_case_file is not None
    enable_warp_statistics_logging = input_case_file is not None
    save_frame_images = input_case_file is not None
    use_masks = True

    # TODO a tiled image with 6x6 bad cases and 6x6 good cases (SDF fields)
    save_tiled_good_vs_bad_case_comparison_image = True
    save_per_case_results_in_root_output_folder = False

    rebuild_optimizer = optimizer_choice != OptimizerChoice.CPP
    max_iterations = 400 if optimizer_choice == OptimizerChoice.CPP else 100

    # dataset location
    frame_count, frame_filename_format, use_masks = shared.check_frame_count_and_format(
        frame_path, not use_masks)
    if frame_filename_format == shared.FrameFilenameFormat.SIX_DIGIT:
        frame_path_format_string = frame_path + os.path.sep + "depth_{:0>6d}.png"
        mask_path_format_string = frame_path + os.path.sep + "mask_{:0>6d}.png"
    else:  # has to be FIVE_DIGIT
        frame_path_format_string = frame_path + os.path.sep + "depth_{:0>5d}.png"
        mask_path_format_string = frame_path + os.path.sep + "mask_{:0>5d}.png"

    # CANDIDATES FOR ARGS
    field_size = 128
    offset = [-64, -64, z_offset]
    line_range = (214, 400)
    view_scaling_factor = 1024 // field_size

    # region ================ Generation of lists of frames & pixel rows to work with ==================================
    check_empty_row = True

    if input_case_file:
        frame_row_and_focus_set = np.genfromtxt(input_case_file,
                                                delimiter=",",
                                                dtype=np.int32)
        # drop column headers
        frame_row_and_focus_set = frame_row_and_focus_set[1:]
        # drop live frame indexes
        frame_row_and_focus_set = np.concatenate(
            (frame_row_and_focus_set[:, 0].reshape(
                -1, 1), frame_row_and_focus_set[:, 2].reshape(
                    -1, 1), frame_row_and_focus_set[:, 3:5]),
            axis=1)
    else:
        frame_set = list(range(0, frame_count - 1, 5))
        pixel_row_set = line_range[0] + (
            (line_range[1] - line_range[0]) *
            np.random.rand(len(frame_set))).astype(np.int32)
        focus_x = np.zeros((
            len(frame_set),
            1,
        ))
        focus_y = np.zeros((
            len(frame_set),
            1,
        ))
        frame_row_and_focus_set = zip(frame_set, pixel_row_set, focus_x,
                                      focus_y)
        if check_empty_row:
            # replace empty rows
            new_pixel_row_set = []
            for canonical_frame_index, pixel_row_index, _, _ in frame_row_and_focus_set:
                live_frame_index = canonical_frame_index + 1
                canonical_frame_path = frame_path_format_string.format(
                    canonical_frame_index)
                canonical_mask_path = mask_path_format_string.format(
                    canonical_frame_index)
                live_frame_path = frame_path_format_string.format(
                    live_frame_index)
                live_mask_path = mask_path_format_string.format(
                    live_frame_index)
                while shared.is_image_row_empty(canonical_frame_path, canonical_mask_path, pixel_row_index, use_masks) \
                        or shared.is_image_row_empty(live_frame_path, live_mask_path, pixel_row_index, use_masks):
                    pixel_row_index = line_range[0] + (
                        line_range[1] - line_range[0]) * np.random.rand()
                new_pixel_row_set.append(pixel_row_index)
            frame_row_and_focus_set = zip(frame_set, pixel_row_set, focus_x,
                                          focus_y)

    # endregion ========================================================================================================

    # logging
    convergence_status_log = []
    convergence_status_log_file_path = os.path.join(
        out_path, "convergence_status_log.csv")

    max_case_count = 36
    good_case_sdfs = []
    bad_case_sdfs = []

    save_log_every_n_runs = 5

    if start_from_sample == 0 and os.path.exists(
            os.path.join(out_path, "output_log.txt")):
        os.unlink(os.path.join(out_path, "output_log.txt"))

    i_sample = 0

    optimizer = None if rebuild_optimizer else \
        build_optimizer(optimizer_choice, out_path, field_size, view_scaling_factor=8, max_iterations=max_iterations,
                        enable_warp_statistics_logging=enable_warp_statistics_logging,
                        data_term_method=data_term_method)

    # run the optimizers
    for canonical_frame_index, pixel_row_index, focus_x, focus_y in frame_row_and_focus_set:
        if i_sample < start_from_sample:
            i_sample += 1
            continue

        sampling.set_focus_coordinates(focus_x, focus_y)

        live_frame_index = canonical_frame_index + 1
        out_subpath = os.path.join(
            out_path, "frames {:0>6d}-{:0>6d} line {:0>3d}".format(
                canonical_frame_index, live_frame_index, pixel_row_index))

        canonical_frame_path = frame_path_format_string.format(
            canonical_frame_index)
        canonical_mask_path = mask_path_format_string.format(
            canonical_frame_index)
        live_frame_path = frame_path_format_string.format(live_frame_index)
        live_mask_path = mask_path_format_string.format(live_frame_index)

        if save_frame_images:

            def highlight_row_and_save_image(path_to_original, output_name,
                                             ix_row):
                output_image = highlight_row_on_gray(
                    rescale_depth_to_8bit(
                        cv2.imread(path_to_original, cv2.IMREAD_UNCHANGED)),
                    ix_row)
                cv2.imwrite(os.path.join(out_subpath, output_name),
                            output_image)

            highlight_row_and_save_image(canonical_frame_path,
                                         "canonical_frame_rh.png",
                                         pixel_row_index)
            highlight_row_and_save_image(live_frame_path, "live_frame_rh.png",
                                         pixel_row_index)

        # Generate SDF fields
        if use_masks:
            dataset = MaskedImageBasedFramePairDataset(
                calibration_path, canonical_frame_path, canonical_mask_path,
                live_frame_path, live_mask_path, pixel_row_index, field_size,
                offset)
        else:
            dataset = ImageBasedFramePairDataset(calibration_path,
                                                 canonical_frame_path,
                                                 live_frame_path,
                                                 pixel_row_index, field_size,
                                                 offset)

        live_field, canonical_field = dataset.generate_2d_sdf_fields(
            method=depth_interpolation_method)

        if save_initial_and_final_fields:
            save_initial_fields(canonical_field, live_field, out_subpath,
                                view_scaling_factor)

        print(
            "{:s} OPTIMIZATION BETWEEN FRAMES {:0>6d} AND {:0>6d} ON LINE {:0>3d}{:s}"
            .format(BOLD_LIGHT_CYAN, canonical_frame_index, live_frame_index,
                    pixel_row_index, RESET),
            end="")

        if rebuild_optimizer:
            optimizer = build_optimizer(
                optimizer_choice,
                out_subpath,
                field_size,
                view_scaling_factor=8,
                max_iterations=max_iterations,
                enable_warp_statistics_logging=enable_warp_statistics_logging,
                data_term_method=data_term_method)
        original_live_field = live_field.copy()
        live_field = optimizer.optimize(live_field, canonical_field)

        # ===================== LOG AFTER-RUN RESULTS ==================================================================

        if save_initial_and_final_fields:
            save_final_fields(canonical_field, live_field, out_subpath,
                              view_scaling_factor)

        if optimizer_choice != OptimizerChoice.CPP:
            # call python-specific logging routines
            optimizer.plot_logged_sdf_and_warp_magnitudes()
            optimizer.plot_logged_energies_and_max_warps()
        else:
            # call C++-specific logging routines
            if enable_warp_statistics_logging:
                warp_statistics = optimizer.get_warp_statistics_as_matrix()
                root_subpath = os.path.join(
                    out_path,
                    "warp_statistics_frames_{:0>6d}-{:0>6d}_row_{:0>3d}.png".
                    format(canonical_frame_index, live_frame_index,
                           pixel_row_index))
                if save_per_case_results_in_root_output_folder:
                    plot_warp_statistics(out_subpath,
                                         warp_statistics,
                                         extra_path=root_subpath)
                else:
                    plot_warp_statistics(out_subpath,
                                         warp_statistics,
                                         extra_path=None)

        convergence_report = optimizer.get_convergence_report()
        max_warp_at_cpp = convergence_report.warp_delta_statistics.longest_warp_location
        max_warp_at = Point2d(max_warp_at_cpp.x, max_warp_at_cpp.y)
        if not convergence_report.iteration_limit_reached:
            if convergence_report.warp_delta_statistics.is_largest_above_max_threshold:
                print(": DIVERGED", end="")
            else:
                print(": CONVERGED", end="")

            if (save_tiled_good_vs_bad_case_comparison_image
                    and not convergence_report.warp_delta_statistics.
                    is_largest_above_max_threshold
                    and len(good_case_sdfs) < max_case_count):
                good_case_sdfs.append(
                    (canonical_field, original_live_field, max_warp_at))

        else:
            print(": NOT CONVERGED", end="")
            if save_tiled_good_vs_bad_case_comparison_image and len(
                    bad_case_sdfs) < max_case_count:
                bad_case_sdfs.append(
                    (canonical_field, original_live_field, max_warp_at))
        print(" IN", convergence_report.iteration_count, "ITERATIONS")

        log_convergence_status(convergence_status_log, convergence_report,
                               canonical_frame_index, live_frame_index,
                               pixel_row_index)

        if rebuild_optimizer:
            del optimizer
            plt.close('all')
            gc.collect()

        i_sample += 1

        if i_sample % save_log_every_n_runs == 0:
            record_convergence_status_log(convergence_status_log,
                                          convergence_status_log_file_path)

    record_convergence_status_log(convergence_status_log,
                                  convergence_status_log_file_path)
    record_cases_files(convergence_status_log, out_path)
    if save_tiled_good_vs_bad_case_comparison_image:
        if len(good_case_sdfs) > 0 and len(bad_case_sdfs) > 0:
            save_tiled_tsdf_comparison_image(
                os.path.join(out_path, "good_vs_bad.png"), good_case_sdfs,
                bad_case_sdfs)
        else:
            if len(good_case_sdfs) == 0:
                print(
                    "Warning: no 'good' cases; skipping saving comparison image"
                )
            elif len(bad_case_sdfs) == 0:
                print(
                    "Warning: no 'bad' cases; skipping saving comparison image"
                )
예제 #11
0
def mark_focus_coordinate_on_sdf_image(image, scale=8):
    focus_coordinates = get_focus_coordinates()
    return mark_point_on_sdf_image(
        image, Point2d(focus_coordinates[0], focus_coordinates[1]), scale)
예제 #12
0
def generate_initial_orthographic_2d_tsdf_fields(
        field_size=128,
        narrow_band_width_voxels=20,
        mimic_eta=False,
        live_smoothing_kernel_size=0,
        canonical_smoothing_kernel_size=0,
        default_value=1):
    # for simplicity, the coordinates for polygonal surface boundary are specified as integers
    # it is unrealistic to expect true boundary to hit voxels dead-on like this,
    # so I add a small vertical offset to each point
    offset = -0.23
    surface_point_coordinates = np.array(
        [[9, 56], [14, 66], [23, 72], [35, 72], [44, 65], [54, 60], [63, 60],
         [69, 64], [76, 71], [84, 73], [91, 72], [106, 63], [109, 57]],
        dtype=np.float32)

    surface_points_extra = np.array([[32, 65], [36, 65], [41, 61]],
                                    dtype=np.float32)

    live_surface_points = []

    for i_point in range(surface_point_coordinates.shape[0]):
        live_surface_points.append(
            Point2d(surface_point_coordinates[i_point, 0],
                    surface_point_coordinates[i_point, 1]))

    live_extra_points = [
        Point2d(surf_point_coordinates[0], surf_point_coordinates[1])
        for surf_point_coordinates in surface_points_extra
    ]

    for live_point in live_surface_points:
        live_point.y += offset  # unrealistic to expect even values
    for live_point in live_extra_points:
        live_point.y += offset

    live_field = generate_sample_orthographic_2d_tsdf_field(
        live_surface_points,
        field_size,
        narrow_band_width_voxels=narrow_band_width_voxels,
        default_value=default_value)
    live_field = add_surface_to_2d_tsdf_field_sample(
        live_field,
        live_extra_points,
        narrow_band_width_voxels=narrow_band_width_voxels)

    # generate canonical field as live field shifted with a constant offset
    canonical_surface_points = [
        Point2d(live_point.x, live_point.y + 5.0)
        for live_point in live_surface_points
    ]
    canonical_extra_points = [
        Point2d(live_point.x, live_point.y + 5.0)
        for live_point in live_extra_points
    ]

    if mimic_eta:
        back_cutoff_voxels = 3
    else:
        back_cutoff_voxels = np.inf

    canonical_field = generate_sample_orthographic_2d_tsdf_field(
        canonical_surface_points,
        field_size,
        narrow_band_width_voxels=narrow_band_width_voxels,
        back_cutoff_voxels=back_cutoff_voxels,
        default_value=default_value)
    canonical_field = add_surface_to_2d_tsdf_field_sample(
        canonical_field,
        canonical_extra_points,
        narrow_band_width_voxels=narrow_band_width_voxels,
        back_cutoff_voxels=back_cutoff_voxels)

    # smooth live & canonical as necessary
    if live_smoothing_kernel_size > 0 and not IGNORE_OPENCV:
        live_field = cv2.GaussianBlur(
            live_field,
            (live_smoothing_kernel_size, live_smoothing_kernel_size),
            0,
            borderType=cv2.BORDER_REPLICATE)
    if canonical_smoothing_kernel_size > 0 and not IGNORE_OPENCV:
        canonical_field = cv2.GaussianBlur(
            canonical_field,
            (canonical_smoothing_kernel_size, canonical_smoothing_kernel_size),
            0,
            borderType=cv2.BORDER_REPLICATE)

    return live_field, canonical_field