Пример #1
0
import numpy as np
import discorpy.losa.loadersaver as io
import discorpy.prep.preprocessing as prep
import discorpy.prep.linepattern as lprep
import discorpy.proc.processing as proc
import discorpy.post.postprocessing as post

# Initial parameters
file_path = "C:/data/laptop_camera/chessboard.jpg"
output_base = "./for_demo_06/"
num_coef = 5  # Number of polynomial coefficients
mat0 = io.load_image(file_path)  # Load image
(height, width) = mat0.shape

# Convert the chessboard image to a line-pattern image
mat1 = lprep.convert_chessboard_to_linepattern(mat0)
io.save_image(output_base + "/line_pattern_converted.jpg", mat1)

# Calculate slope and distance between lines
slope_hor, dist_hor = lprep.calc_slope_distance_hor_lines(mat1,
                                                          radius=15,
                                                          sensitive=0.5)
slope_ver, dist_ver = lprep.calc_slope_distance_ver_lines(mat1,
                                                          radius=15,
                                                          sensitive=0.5)
print("Horizontal slope: ", slope_hor, " Distance: ", dist_hor)
print("Vertical slope: ", slope_ver, " Distance: ", dist_ver)

# Extract reference-points
list_points_hor_lines = lprep.get_cross_points_hor_lines(mat1,
                                                         slope_ver,
Пример #2
0
import numpy as np
import discorpy.losa.loadersaver as io
import discorpy.prep.preprocessing as prep
import discorpy.proc.processing as proc
import discorpy.post.postprocessing as post

# Initial parameters
file_path = "C:/data/dot_pattern_01.jpg"
output_base = "./output_demo_01/"
num_coef = 5  # Number of polynomial coefficients
mat0 = io.load_image(file_path) # Load image
(height, width) = mat0.shape
# Segment dots
mat1 = prep.binarization(mat0)
# Calculate the median dot size and distance between them.
(dot_size, dot_dist) = prep.calc_size_distance(mat1)
# Remove non-dot objects
mat1 = prep.select_dots_based_size(mat1, dot_size)
# Remove non-elliptical objects
mat1 = prep.select_dots_based_ratio(mat1)
io.save_image(output_base + "/segmented_dots.jpg", mat1)
# Calculate the slopes of horizontal lines and vertical lines.
hor_slope = prep.calc_hor_slope(mat1)
ver_slope = prep.calc_ver_slope(mat1)
print("Horizontal slope: {0}. Vertical slope: {1}".format(hor_slope, ver_slope))

# Group points to horizontal lines
list_hor_lines = prep.group_dots_hor_lines(mat1, hor_slope, dot_dist)
# Group points to vertical lines
list_ver_lines = prep.group_dots_ver_lines(mat1, ver_slope, dot_dist)
# Optional: remove horizontal outliners
Пример #3
0
 def test_load_image(self):
     file_path = "data/img.tif"
     losa.save_image(file_path, np.random.rand(64, 64))
     mat = losa.load_image(file_path)
     self.assertTrue(mat.shape == (64, 64))
Пример #4
0
time_start = timeit.default_timer()
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Initial parameters
file_path = "../data/coef_dot_05.txt"
output_base = "E:/correction/"
search_range = 100
step = 20
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Import distortion coefficients
(xcenter, ycenter, list_fact) = io.load_metadata_txt(file_path)

# Generate a 3D dataset for demonstration.
# Replace this step with a real 3D data in your codes.
mat0 = io.load_image("../data/dot_pattern_05.jpg")
(height, width) = mat0.shape
mat3D = np.zeros((600, height, width), dtype=np.float32)
mat3D[:] = mat0

# Generate unwarped slice with the index of 14
# at different xcenter and ycenter.
index = 14
for x_search in range(-search_range, search_range + step, step):
    for y_search in range(-search_range, search_range + step, step):
        corrected_slice = post.unwarp_slice_backward(mat3D, xcenter + x_search,
                                                     ycenter + y_search,
                                                     list_fact, index)
        # ----------------------------------------------------------
        # Do reconstruction here using other packages: tomopy, astra
        # ----------------------------------------------------------
Пример #5
0
import numpy as np
import discorpy.losa.loadersaver as io
import discorpy.proc.processing as proc
import discorpy.post.postprocessing as post
import scipy.ndimage as ndi

file_path = "C:/data/percy_cam/F_R_hazcam.png"
output_base = "./for_demo_08/"
mat0 = io.load_image(file_path, average=True)
mat0 = mat0 / np.max(mat0)
(height, width) = mat0.shape

# Create a line-pattern image
line_pattern = np.zeros((height, width), dtype=np.float32)
for i in range(50, height - 50, 40):
    line_pattern[i - 2:i + 3] = 1.0

# Estimate parameters by visual inspection:
# Coarse estimation
xcenter = width // 2
ycenter = height // 2
list_power = np.asarray([1.0, 10**(-4), 10**(-7), 10**(-10), 10**(-13)])
list_coef = np.asarray([1.0, 1.0, 1.0, 1.0, 1.0])

# Rotate the line-pattern image if need to
angle = 2.0  # Degree
pad = width // 2  # Need padding as lines are shrunk after warping.
mat_pad = np.pad(line_pattern, pad, mode='edge')
mat_pad = ndi.rotate(mat_pad, angle, reshape=False)

Пример #6
0
# Author: Nghia T. Vo
# E-mail: [email protected]
#============================================================================
"""
Example to show how to guess parameters of a forward model from
an unknown camera. In this case it's from  the Hazard Cameras (Hazcams) on the
underside of NASA’s Perseverance Mars rover.
https://mars.nasa.gov/system/downloadable_items/45689_PIA24430-Perseverance's_first_full-color_look_at_Mars.png
"""

import numpy as np
import discorpy.losa.loadersaver as io
import discorpy.post.postprocessing as post

# Load image
mat0 = io.load_image("Sol0_1st_color.png")
output_base = "figs/"
(height, width) = mat0.shape
mat0 = mat0 / np.max(mat0)

# Create line pattern
line_pattern = np.zeros((height, width), dtype=np.float32)
for i in range(50, height - 50, 40):
    line_pattern[i - 1:i + 2] = 1.0

# Estimate parameters by visual inspection.
# Coarse estimation
xcenter = width / 2.0 + 110.0
ycenter = height / 2.0 - 20.0
list_pow = np.asarray([1.0, 10**(-4), 10**(-7), 10**(-10), 10**(-13)])
# Fine estimation
Пример #7
0
import numpy as np
import discorpy.losa.loadersaver as io
import discorpy.proc.processing as proc
import discorpy.post.postprocessing as post

# Load image
file_path = "C:/data/demo/perspective_demo.jpg"
output_base = "./for_demo_07/"
mat = io.load_image(file_path, average=False)

# Provide the coordinates of 4-points. They can be in xy-order or yx-order, this info
# needs to be consistent with other functions. In this example, it's in the xy-order.
list_points = [[180, 1920], [1500, 1602], [2754, 2430], [942, 3246]]

# Generate undistorted points. Note that the output coordinate is in the yx-order.
s_points, t_points = proc.generate_4_source_target_perspective_points(
    list_points, input_order="xy", scale="mean", equal_dist=False)

# Calculate distortion coefficients
list_coef = proc.calc_perspective_coefficients(s_points,
                                               t_points,
                                               mapping="backward")
# Apply correction.
mat_cor = np.zeros_like(mat)
for i in range(mat_cor.shape[-1]):
    mat_cor[:, :, i] = post.correct_perspective_image(mat[:, :, i], list_coef)
io.save_image(output_base + "/corrected_image.jpg", mat_cor)

# The region of interest may be out of the field of view or rotated.
# We can rotate the output image, offset, and scale it by changing the target
# points as follows:
Пример #8
0
flat_path = args.flat
poly_order = args.order
key_path_hdf = args.key
perspective = args.perspective

time_start = timeit.default_timer()
# Load data
print("1 ---> Load file: {}".format(file_path))
_, file_ext = os.path.splitext(file_path)
if (file_ext == ".hdf") or (file_ext == ".nxs"):
    mat0 = io.load_hdf_file(file_path, key_path=key_path_hdf, index=None,
                            axis=0)
    if len(mat0.shape) == 3:
        mat0 = np.mean(mat0, axis=0)
else:
    mat0 = io.load_image(file_path)
(height, width) = mat0.shape

# Load flat-field
if (flat_path != "none") and (flat_path != "norm"):
    _, file_ext = os.path.splitext(flat_path)
    if (file_ext == ".hdf") or (file_ext == ".nxs"):
        flat = io.load_hdf_file(flat_path, key_path=key_path_hdf, index=None,
                                axis=0)
        if len(flat.shape) == 3:
            flat = np.mean(flat, axis=0)
    else:
        flat = io.load_image(flat_path)
    (height1, width1) = flat.shape
    if (height != height1) or (width != width):
        raise ValueError(