Exemple #1
0
 def extract(self, img_path):
     if not os.path.isfile(img_path):
         print('File not found:', img_path)
         return None
     image = pb.load_single_band(img_path, np.uint8)
     self.detector.detect(image)
     qr_codes = []
     for qr in self.detector.detections:
         qr_codes.append({
             'text': qr.message,
             'points': qr.bounds.convert_tuple()
         })
     return qr_codes
    def scan(self, img_path):
        detector = pb.FactoryFiducial(np.uint8).qrcode()

        #Load image in grayscale
        bcv_img = pb.load_single_band(img_path, np.uint8)

        detector.detect(bcv_img)

        if (len(detector.detections) > 0):
            print(detector.detections[0].message)

        return len(detector.detections) + len(detector.failures), len(
            detector.detections)
 def extract(self, img_path):
     image = pb.load_single_band(img_path, np.uint8)
     self.detector.detect(image)
     qr_codes = []
     for qr in self.detector.detections:
         qr_codes.append({
             'type':
             'qr',
             'text':
             qr.message,
             'points':
             [list(vertex) for vertex in qr.bounds.convert_tuple()]
         })
     return qr_codes
Exemple #4
0
import pyboof as pb

import numpy as np

data_path = "../../../data/applet/fiducial/image/examples/"

# Load the camera parameters
intrinsic = pb.Intrinsic()
intrinsic.load_xml(data_path+"intrinsic.xml")

config = pb.ConfigFiducialImage()

print "Configuring detector"
detector = pb.FactoryFiducial( np.uint8 ).squareRobust(config,6)
detector.setIntrinsic(intrinsic)
detector.addPattern(pb.load_single_band(data_path+"../patterns/chicken.png",np.uint8),4.0)
detector.addPattern(pb.load_single_band(data_path+"../patterns/yu.png",np.uint8),4.0)

print "Detecting image"
detector.detect(pb.load_single_band(data_path+"image01.jpg",np.uint8))

print "Number Found = "+str(detector.totalFound())

for i in range(detector.totalFound()):
    print "=========== Found "+str(i)
    fid_to_cam = detector.getFiducialToCamera(i)
    # print fid_to_cam
    print "Rotation"
    print "  "+str(fid_to_cam.get_rotation())
    print "Translation"
    print "  "+str(fid_to_cam.get_translation())
start = time.time()

#camera taking the picture
camera = PiCamera()
print(datetime.now())  #this output is for the logfile
#camera.resolution = (2592, 1944)
camera.resolution = (3280, 2464)
#camera.shutter_speed=4000
camera.capture('/home/pi/Desktop/capture.jpg')  # saving picture
camera.close()

#pyboof

pb.init_memmap()

gray = pb.load_single_band('/home/pi/Desktop/capture.jpg',
                           np.uint8)  # loading the picture
detector = pb.FactoryFiducial(np.uint8).qrcode()  #qrcode detecting
detector.detect(gray)

values = ''  #create empty string

response = detector.detections
for qr in response:
    values += '{} '.format(qr.message)

print(values)  #this output is for the logfile
print(len(response))  #this output is for the logfile

#temperature
#the sensor adresse needs to be matched to the one of the sensor connected
sensor = '/sys/bus/w1/devices/28-01144058eeaa/w1_slave'
Exemple #6
0
data_path = "../data/example/fiducial/image/examples/"

# Load the camera parameters
intrinsic = pb.CameraPinhole()
intrinsic.load(os.path.join(data_path, "intrinsic.yaml"))

configFiducial = pb.ConfigFiducialImage()
configThreshold = pb.ConfigThreshold.create_local(pb.ThresholdType.LOCAL_MEAN,
                                                  10)

print("Configuring detector")
detector = pb.FactoryFiducial(np.uint8).square_image(configFiducial,
                                                     configThreshold)
detector.set_intrinsic(intrinsic)
detector.add_pattern(pb.load_single_band(
    data_path + "../patterns/pentarose.png", np.uint8),
                     side_length=4.0)
detector.add_pattern(pb.load_single_band(data_path + "../patterns/yu.png",
                                         np.uint8),
                     side_length=4.0)

print("Detecting image")
detector.detect(
    pb.load_single_band(os.path.join(data_path, "image00.jpg"), np.uint8))

print("Number Found = " + str(detector.get_total()))

for i in range(detector.get_total()):
    print("=========== Found #" + str(i))
    fid_to_cam = detector.get_fiducial_to_camera(i)
    print("Pattern ID = " + str(detector.get_id(i)))
#!/usr/bin/env python3

import numpy as np
import pyboof as pb
import time

# This example shows how you can adjust the number of threads that BoofCV will use in the JVM
original = pb.load_single_band('../data/example/outdoors01.jpg', np.uint8)

gaussian = original.createSameShape()

# Let's warm up the JVM.
for i in range(5):
    time0 = time.time()
    pb.blur_gaussian(original, gaussian, radius=12)
    time1 = time.time()
    print("Warm up iteration {:.1f} ms".format(1000 * (time1 - time0)))

print()
print()

N = 30

time0 = time.time()
for i in range(N):
    pb.blur_gaussian(original, gaussian, radius=12)
time1 = time.time()

print("Time with default threads {:.1f} ms".format(1000 * (time1 - time0)))

pb.set_max_threads(1)
Exemple #8
0
recognizer = pb.FactorySceneRecognition(np.uint8).scene_recognition()

# First we need to create a model so that it knows how to describe a model. BoofCV does provide a
# pre-build model generated from vacation photos. This is fast enough that often its just easier to train it
# on the images you plan to search.
print("Learning the model. This can take a moment or two.")
recognizer.learn_model(list_images)

# Alternatively you can comment out the code above (lines 18 to 24) and load
# a pre-build model by uncommenting the line below
# recognizer = pb.download_default_scene_recognition(np.uint8, "saved_models")

# Now add all the images that we wish to look up
print("Adding images to the database")
for image_file in list_images:
    boof_gray = pb.load_single_band(image_file, np.uint8)
    recognizer.add_image(image_file, boof_gray)

# Let's look one up and see which images are related
print("Making a query: ", list_images[6])
query_image = pb.load_single_band(list_images[6], np.uint8)
found_matches = recognizer.query(query_image, 5)

# We are expecting 3 matches to be first, then other two will be incorrect/noise
print("len={}".format(len(found_matches)))
print("\nResults:")
for m in found_matches:
    print("{:s} error={:f}".format(m["id"], m["error"]))

# Display the results
image_list = [(query_image, "Query")]
import numpy as np

data_path = "../data/example/fiducial/image/examples/"

# Load the camera parameters
intrinsic = pb.Intrinsic()
intrinsic.load_xml(data_path+"intrinsic.xml")

configFiducial = pb.ConfigFiducialImage()
configThreshold = pb.ConfigThreshold.create_local(pb.ThresholdType.LOCAL_SQUARE,10)

print "Configuring detector"
detector = pb.FactoryFiducial( np.uint8 ).squareImage(configFiducial,configThreshold)
detector.setIntrinsic(intrinsic)
detector.addPattern(pb.load_single_band(data_path+"../patterns/pentarose.png",np.uint8),4.0)
detector.addPattern(pb.load_single_band(data_path+"../patterns/yu.png",np.uint8),4.0)

print "Detecting image"
detector.detect(pb.load_single_band(data_path+"image00.jpg",np.uint8))

print "Number Found = "+str(detector.totalFound())

for i in range(detector.totalFound()):
    print "=========== Found #"+str(i)
    fid_to_cam = detector.getFiducialToCamera(i)
    # print fid_to_cam
    print "Pattern ID = "+str(detector.get_id(i))
    print "Rotation"
    print "  "+str(fid_to_cam.get_rotation())
    print "Translation"
import numpy as np

import pyboof as pb
from pyboof.swing import visualize_matches

# Enable use of memory mapped files for MUCH faster conversion between some python and boofcv data types
pb.init_memmap(5)

# Load two images
image0 = pb.load_single_band("../data/example/stitch/cave_01.jpg", np.uint8)
image1 = pb.load_single_band("../data/example/stitch/cave_02.jpg", np.uint8)

# Set up the SURF fast hessian feature detector.  Reduce the number of features it will detect by putting a limit
# on how close two features can be and the maximum number at each scale
config_fh = pb.ConfigFastHessian()
config_fh.extractRadius = 4
config_fh.maxFeaturesPerScale = 300

# Create the detector and use default for everything else
feature_detector = pb.FactoryDetectDescribe(np.uint8).createSurf(config_detect=config_fh)

# Detect features in the first image
locs0, desc0 = feature_detector.detect(image0)
locs1, desc1 = feature_detector.detect(image1)

print "Detected {:4d} features in image 0".format(len(desc0))
print "         {:4d}             image 1".format(len(desc1))

factory_association = pb.FactoryAssociate()
factory_association.set_score(pb.AssocScoreType.DEFAULT,feature_detector.get_descriptor_type())
associator = factory_association.greedy()
Exemple #11
0
import pyboof as pb
import numpy as np

original = pb.load_single_band('../../../data/applet/outdoors01.jpg', np.uint8)

gaussian = original.createSameShape(
)  # useful function which creates a new image of the
mean = original.createSameShape()  # same type and shape as the original

# Apply different types of blur to the image
pb.blur_gaussian(original, gaussian, radius=3)
pb.blur_mean(original, mean, radius=3)

# display the results in a single window as a list
image_list = [(original, "original"), (gaussian, "gaussian"), (mean, "mean")]
pb.swing.show_list(image_list, title="Outputs")
Exemple #12
0
import numpy as np

import pyboof as pb

data_path = "../data/example/calibration/stereo/Bumblebee2_Chess/"

# Load the camera parameters
intrinsic = pb.Intrinsic()
intrinsic.load_xml(data_path + "intrinsicLeft.xml")

# Load original image and the undistorted image
original = pb.load_single_band(data_path + "left08.jpg", np.uint8)
undistorted = original.createSameShape()

# Remove distortion and show the results
pb.remove_distortion(original, undistorted, intrinsic)
image_list = [(original, "Original"), (undistorted, "Undistorted")]
pb.swing.show_list(image_list, title="Lens Distortion")

input("Press any key to exit")
#!/usr/bin/env python3

import numpy as np

import pyboof as pb
from pyboof.swing import visualize_matches

# Enable use of memory mapped files for MUCH faster conversion between some python and boofcv data types
pb.init_memmap(5)

# Load two images and camera calibration
image0 = pb.load_single_band("../data/example/stereo/mono_wall_01.jpg",
                             np.uint8)
image1 = pb.load_single_band("../data/example/stereo/mono_wall_02.jpg",
                             np.uint8)
intrinsic = pb.CameraPinhole().load(
    "../data/example/calibration/mono/Sony_DSC-HX5V_Chess/intrinsic.yaml")

# Set up the SURF fast hessian feature detector.  Reduce the number of features it will detect by putting a limit
# on how close two features can be and the maximum number at each scale
config_fh = pb.ConfigFastHessian()
config_fh.extractRadius = 4
config_fh.maxFeaturesPerScale = 300

# Create the detector and use default for everything else
feature_detector = pb.FactoryDetectDescribe(
    np.uint8).createSurf(config_detect=config_fh)

# Detect features in the first image
locs0, desc0 = feature_detector.detect(image0)
locs1, desc1 = feature_detector.detect(image1)
import numpy as np
import pyboof as pb

original = pb.load_single_band("../data/example/outdoors01.jpg", np.uint8)

gaussian = original.createSameShape()  # useful function which creates a new image of the
mean = original.createSameShape()  # same type and shape as the original

# Apply different types of blur to the image
pb.blur_gaussian(original, gaussian, radius=3)
pb.blur_mean(original, mean, radius=3)

# display the results in a single window as a list
image_list = [(original, "original"), (gaussian, "gaussian"), (mean, "mean")]
pb.swing.show_list(image_list, title="Outputs")

raw_input("Press any key to exit")
Exemple #15
0
import numpy as np

data_path = "../data/example/fiducial/image/examples/"

# Load the camera parameters
intrinsic = pb.Intrinsic()
intrinsic.load_xml(data_path+"intrinsic.xml")

configFiducial = pb.ConfigFiducialImage()
configThreshold = pb.ConfigThreshold.create_local(pb.ThresholdType.LOCAL_SQUARE,10)

print("Configuring detector")
detector = pb.FactoryFiducial( np.uint8 ).squareImage(configFiducial,configThreshold)
detector.setIntrinsic(intrinsic)
detector.addPattern(pb.load_single_band(data_path+"../patterns/pentarose.png",np.uint8),4.0)
detector.addPattern(pb.load_single_band(data_path+"../patterns/yu.png",np.uint8),4.0)

print("Detecting image")
detector.detect(pb.load_single_band(data_path+"image00.jpg",np.uint8))

print("Number Found = "+str(detector.totalFound()))

for i in range(detector.totalFound()):
    print("=========== Found #"+str(i))
    fid_to_cam = detector.getFiducialToCamera(i)
    # print fid_to_cam
    print("Pattern ID = "+str(detector.get_id(i)))
    print("Rotation")
    print("  "+str(fid_to_cam.get_rotation()))
    print("Translation")
Exemple #16
0
import pyboof as pb

import numpy as np

original = pb.load_single_band('../../../data/applet/outdoors01.jpg',np.uint8)

# Let BoofCV decide on the type of image to store the gradient as
deriv_dtype = pb.gradient_dtype(pb.get_dtype(original))

# Declare the gradient images
derivX = pb.create_single_band(original.getWidth(),original.getHeight(),deriv_dtype)
derivY = pb.create_single_band(original.getWidth(),original.getHeight(),deriv_dtype)

# Compute the results for a few operators and visualize
pb.gradient(original,derivX,derivY,pb.GradientType.SOBEL)
buffered_sobel = pb.swing.colorize_gradient(derivX,derivY)

pb.gradient(original,derivX,derivY,pb.GradientType.PREWITT)
buffered_prewitt = pb.swing.colorize_gradient(derivX,derivY)

pb.gradient(original,derivX,derivY,pb.GradientType.THREE)
buffered_three = pb.swing.colorize_gradient(derivX,derivY)

pb.gradient(original,derivX,derivY,pb.GradientType.TWO0)
buffered_two0 = pb.swing.colorize_gradient(derivX,derivY)

pb.gradient(original,derivX,derivY,pb.GradientType.TWO1)
buffered_two1 = pb.swing.colorize_gradient(derivX,derivY)

# display the results in a single window as a list
image_list = [(original,"original"),
Exemple #17
0
intrinsic = pb.CameraPinhole()
intrinsic.load(os.path.join(data_path, "intrinsic.yaml"))

# Load a pre-built dictionary
config_marker = pb.load_hamming_marker(pb.HammingDictionary.ARUCO_MIP_25h7)

# Tweak the detector. None class be passed in for the detector
config_detector = pb.ConfigFiducialHammingDetector()
config_detector.configThreshold.type = pb.ThresholdType.LOCAL_MEAN

print("Creating the detector")
detector = pb.FactoryFiducial(np.uint8).square_hamming(config_marker,
                                                       config_detector)

print("Detecting image")
detector.detect(
    pb.load_single_band(os.path.join(data_path, "image01.jpg"), np.uint8))

print("Number Found = " + str(detector.get_total()))

for i in range(detector.get_total()):
    print("=========== Found #" + str(i))
    fid_to_cam = detector.get_fiducial_to_camera(i)
    print("Pattern ID = " + str(detector.get_id(i)))
    print("Image Location " + str(detector.get_center(i)))
    if detector.is_3d():
        print("Rotation")
        print("  " + str(fid_to_cam.get_rotation()))
        print("Translation")
        print("  " + str(fid_to_cam.get_translation()))
Exemple #18
0
#!/usr/bin/env python3

import numpy as np

import pyboof as pb
from pyboof.swing import visualize_lines

# Load an image with strong lines in it
image = pb.load_single_band("../data/example/simple_objects.jpg", np.uint8)
blurred = image.createSameShape()

# Appying a little bit of blur tends to improve the results
pb.blur_gaussian(image, blurred, radius=5)

# There are a few variants of Hough in BoofCV. The variant we will use here uses the image gradient directly
# This is useful you want to find the edges of objects. If you have an image with thin black lines and you want
# to find the lines and not their edges then the binary variant is what you want to use
config_gradient = pb.ConfigHoughGradient(10)

# Detect the lines using several different variants of Hough line detector
results = []

detector = pb.FactoryDetectLine(
    np.uint8).houghLinePolar(config_hough=config_gradient)
results.append(("Gradient Polar", detector.detect(blurred)))
detector = pb.FactoryDetectLine(
    np.uint8).houghLineFoot(config_hough=config_gradient)
results.append(("Gradient Foot", detector.detect(blurred)))

# Use swing to visualize the results
visualize_lines(image, results)
Exemple #19
0
#!/usr/bin/env python3

import numpy as np
import pyboof as pb

# Detects all the QR Codes in the image and prints their message and location
data_path = "../data/example/fiducial/qrcode/image03.jpg"

detector = pb.FactoryFiducial(np.uint8).qrcode()

image = pb.load_single_band(data_path, np.uint8)

detector.detect(image)

print("Detected a total of {} QR Codes".format(len(detector.detections)))

for qr in detector.detections:
    print("Message: " + qr.message)
    print("     at: " + str(qr.bounds))
Exemple #20
0
#!/usr/bin/env python3

import numpy as np

import pyboof as pb

original = pb.load_single_band(
    '../data/example/fiducial/image/examples/image00.jpg', np.uint8)

binary = pb.create_single_band(original.getWidth(), original.getHeight(),
                               np.uint8)

algorithms = []

factory = pb.FactoryThresholdBinary(np.uint8)

algorithms.append(("localGaussian", factory.localGaussian(region_width=11)))
algorithms.append(("localSauvola", factory.localSauvola(region_width=11)))
algorithms.append(("localMean", factory.localMean(region_width=11)))
algorithms.append(("localNick", factory.localNick(region_width=11)))
# algorithms.append(("localOtsu"  ,factory.localOtsu(region_width=11))) # This can be slow
algorithms.append(("blockMinMax", factory.blockMinMax(region_width=11)))
algorithms.append(("blockMean", factory.blockMean(region_width=11)))
algorithms.append(("blockOtsu", factory.blockOtsu(region_width=11)))
algorithms.append(("globalEntropy", factory.globalEntropy()))
algorithms.append(("globalOtsu", factory.globalOtsu()))
algorithms.append(("globalLi", factory.globalLi()))
algorithms.append(("globalHuang", factory.globalHuang()))
algorithms.append(("globalFixed", factory.globalFixed(threshold=100)))

image_list = [(original, "Original")]
Exemple #21
0
data_path = "../data/example/fiducial/binary/"

# Load the camera parameters
intrinsic = pb.CameraPinhole()
intrinsic.load(os.path.join(data_path, "intrinsic.yaml"))

configFiducial = pb.ConfigFiducialBinary(target_width=0.3)
configThreshold = pb.ConfigThreshold.create_local(pb.ThresholdType.LOCAL_MEAN, 10)

print("Configuring detector")
detector = pb.FactoryFiducial(np.uint8).square_binary(configFiducial, configThreshold)
# Without intrinsics only pattern ID and pixel location can be found
detector.set_intrinsic(intrinsic)

print("Detecting image")
detector.detect(pb.load_single_band(os.path.join(data_path, "image0000.jpg"), np.uint8))

print("Number Found = "+str(detector.get_total()))

for i in range(detector.get_total()):
    print("=========== Found #{}".format(i))
    fid_to_cam = detector.get_fiducial_to_camera(i)
    print("Pattern ID = "+str(detector.get_id(i)))
    print("Image Location " + str(detector.get_center(i)))
    if detector.is_3d():
        print("Rotation")
        print("  "+str(fid_to_cam.get_rotation()))
        print("Translation")
        print("  "+str(fid_to_cam.get_translation()))
Exemple #22
0
import numpy as np

data_path = "../../../data/applet/fiducial/image/examples/"

# Load the camera parameters
intrinsic = pb.Intrinsic()
intrinsic.load_xml(data_path + "intrinsic.xml")

config = pb.ConfigFiducialImage()

print "Configuring detector"
detector = pb.FactoryFiducial(np.uint8).squareRobust(config, 6)
detector.setIntrinsic(intrinsic)
detector.addPattern(
    pb.load_single_band(data_path + "../patterns/chicken.png", np.uint8), 4.0)
detector.addPattern(
    pb.load_single_band(data_path + "../patterns/yu.png", np.uint8), 4.0)

print "Detecting image"
detector.detect(pb.load_single_band(data_path + "image01.jpg", np.uint8))

print "Number Found = " + str(detector.totalFound())

for i in range(detector.totalFound()):
    print "=========== Found " + str(i)
    fid_to_cam = detector.getFiducialToCamera(i)
    # print fid_to_cam
    print "Rotation"
    print "  " + str(fid_to_cam.get_rotation())
    print "Translation"
import numpy as np

import matplotlib.pyplot as plt
import pyboof as pb

# Enable use of memory mapped files for MUCH faster conversion between some python and boofcv data types
pb.init_memmap(5)

# Load two images
image0 = pb.load_single_band("../data/example/stereo/chair01_left.jpg",
                             np.uint8)
image1 = pb.load_single_band("../data/example/stereo/chair01_right.jpg",
                             np.uint8)

# Load stereo rectification
stereo_param = pb.StereoParameters()
stereo_param.load(
    "../data/example/calibration/stereo/Bumblebee2_Chess/stereo.xml")

# Rectify and undistort the images
model_rectifier = pb.StereoRectification(stereo_param.left, stereo_param.right,
                                         stereo_param.right_to_left)
model_rectifier.all_inside_left()

distort_left = model_rectifier.create_distortion(
    pb.ImageType(image0.getImageType()), True)
distort_right = model_rectifier.create_distortion(
    pb.ImageType(image0.getImageType()), False)

rect0 = image0.createSameShape()
rect1 = image1.createSameShape()
Exemple #24
0
    [0, 20, 0],  # y-axis
    [20, 20, 0]  # (1,1,0) point
], dtype=np.float64)

#               Get Camera matrix
mtx, dist = calibrate_camera.get_calibration_results()

#               Initialize axis 3D object points
axis = np.float32([[20, 0, 0],  # x-axis
                   [0, 20, 0],  # y-axis
                   [0, 0, 20]  # z-axis
                   ]).reshape(-1, 3)

#               Get image
detector = pb.FactoryFiducial(np.uint8).qrcode()
image = pb.load_single_band(filename, np.uint8)

#               Detect QR codes in image
detector.detect(image)
num_founded_qr = len(detector.detections)
print("Detected a total of {} QR Codes".format(len(detector.detections)))
for qr in detector.detections:
    print("Message: "+qr.message)
    print("     at: "+str(qr.bounds))

#               Construct 2D image points

object_points_1 = detector.detections[0].bounds
object_points_2 = detector.detections[1].bounds

A_imgpts = np.array([
import numpy as np

import pyboof as pb

data_path = "../data/example/calibration/stereo/Bumblebee2_Chess/"

# Load the camera parameters
intrinsic = pb.Intrinsic()
intrinsic.load_xml(data_path+"intrinsicLeft.xml")

# Load original image and the undistorted image
original = pb.load_single_band(data_path+"left08.jpg", np.uint8)
undistorted = original.createSameShape()

# Remove distortion and show the results
pb.remove_distortion(original, undistorted, intrinsic)
image_list = [(original,"Original"), (undistorted,"Undistorted")]
pb.swing.show_list(image_list, title="Lens Distortion")

raw_input("Press any key to exit")
import numpy as np

import pyboof as pb

original = pb.load_single_band('../data/example/fiducial/image/examples/image00.jpg',np.uint8)

binary = pb.create_single_band(original.getWidth(),original.getHeight(),np.uint8)

algorithms = []

factory = pb.FactoryThresholdBinary(np.uint8)

algorithms.append(("localGaussian",factory.localGaussian(radius=5)))
algorithms.append(("localSauvola" ,factory.localSauvola(radius=5)))
algorithms.append(("localSquare"  ,factory.localSquare(radius=5)))
algorithms.append(("globalEntropy",factory.globalEntropy()))
algorithms.append(("globalOtsu"   ,factory.globalOtsu()))
algorithms.append(("globalFixed"  ,factory.globalFixed(threshold=100)))

image_list = [(original, "Original")]

for a in algorithms:
    a[1].process(original, binary)
    buffered_binary = pb.swing.render_binary(binary)
    image_list.append((buffered_binary,a[0]))

pb.swing.show_list(image_list, title="Binary Thresholding")

raw_input("Press any key to exit")
Exemple #27
0
#!/usr/bin/env python3

import numpy as np

import pyboof as pb
from pyboof.swing import visualize_matches

# Enable use of memory mapped files for MUCH faster conversion between some python and boofcv data types
pb.init_memmap()

# Load two images
image0 = pb.load_single_band("../data/example/stitch/cave_01.jpg", np.uint8)
image1 = pb.load_single_band("../data/example/stitch/cave_02.jpg", np.uint8)

# Set up the SURF fast hessian feature detector.  Reduce the number of features it will detect by putting a limit
# on how close two features can be and the maximum number at each scale
config_fh = pb.ConfigFastHessian()
config_fh.extractRadius = 4
config_fh.maxFeaturesPerScale = 300

# Create the detector and use default for everything else
feature_detector = pb.FactoryDetectDescribe(np.uint8).createSurf(config_detect=config_fh)

# Detect features in the first image
locs0, desc0 = feature_detector.detect(image0)
locs1, desc1 = feature_detector.detect(image1)

print("Detected {:4d} features in image 0".format(len(desc0)))
print("         {:4d}             image 1".format(len(desc1)))

Exemple #28
0
#!/usr/bin/env python3

import numpy as np
import os
import pyboof as pb

data_path = "../data/example/calibration/stereo/Bumblebee2_Chess/"

# Load the camera parameters
intrinsic = pb.CameraPinhole()
intrinsic.load(os.path.join(data_path, "intrinsicLeft.yaml"))

# Load original image and the undistorted image
original = pb.load_single_band(os.path.join(data_path, "left08.jpg"), np.uint8)
undistorted = original.createSameShape()

# Remove distortion and show the results
pb.remove_distortion(original, undistorted, intrinsic)
image_list = [(original, "Original"), (undistorted, "Undistorted")]
pb.swing.show_list(image_list, title="Lens Distortion")

input("Press any key to exit")
import numpy as np

import cv2
import pyboof as pb

# Enable use of memory mapped files for MUCH faster conversion of images between java and python
pb.init_memmap(5)

image_path = '../data/example/outdoors01.jpg'

# Can load an image using OpenCV then convert it into BoofCV
ndarray_img = cv2.imread(image_path, 0)

boof_cv = pb.ndarray_to_boof(ndarray_img)

# Can also use BoofCV to load the image directly
boof_gray = pb.load_single_band(image_path, np.uint8)
boof_color = pb.load_planar(image_path, np.uint8)

# Let's display all 3 of them in Java
# display the results in a single window as a list
image_list = [(boof_cv, "OpenCV"), (boof_gray, "Gray Scale"),
              (boof_color, "Color")]

pb.swing.show_list(image_list, title="Images")

input("Press any key to exit")
import numpy as np

import cv2
import pyboof as pb

# Enable use of memory mapped files for MUCH faster conversion of images between java and python
pb.init_memmap(5)

image_path = '../data/example/outdoors01.jpg'

# Can load an image using OpenCV then convert it into BoofCV
ndarray_img = cv2.imread(image_path,0)

boof_cv = pb.ndarray_to_boof(ndarray_img)

# Can also use BoofCV to load the image directly
boof_gray = pb.load_single_band(image_path,np.uint8)
boof_color = pb.load_planar(image_path,np.uint8)

# Let's display all 3 of them in Java
# display the results in a single window as a list
image_list = [(boof_cv,"OpenCV"),
              (boof_gray,"Gray Scale"),
              (boof_color,"Color")]

pb.swing.show_list(image_list,title="Images")

raw_input("Press any key to exit")
Exemple #31
0
import pyboof as pb
import numpy as np
import os
import glob

# Demonstration of how to calibrate a camera using a pinhole model
data_path = "../data/example/calibration/stereo/Bumblebee2_Chess/"

print("Configuring and creating a chessboard detector")
config_grid = pb.ConfigGridDimen(num_rows=5, num_cols=7, square_width=0.3)
detector = pb.FactoryFiducialCalibration.chessboardX(config_grid)

print("Detecting calibration targets")
observations = []
for file in glob.glob(os.path.join(data_path, "left*.jpg")):
    image = pb.load_single_band(file, np.float32)
    detector.detect(image)
    if detector.detected_points:
        print("success " + file)
        o = {
            "width": image.getWidth(),
            "height": image.getHeight(),
            "pixels": detector.detected_points
        }
        observations.append(o)
    else:
        print("failed " + file)

print("Solving for intrinsic parameters")

intrinsic, errors = pb.calibrate_brown(observations,
Exemple #32
0
# Demonstration of how to calibrate a stereo camera
data_path = "../data/example/calibration/stereo/Zed_ecocheck/"

print("Configuring and creating a chessboard detector")
config_ecocheck = pb.ecocheck_parse("9x7n1", square_size=0.3)
detector = pb.FactoryFiducialCalibration.ecocheck(config_ecocheck)

print("Detecting calibration targets")
files_left = sorted(glob.glob(os.path.join(data_path, "left*.jpg")))
files_right = sorted(glob.glob(os.path.join(data_path, "right*.jpg")))

observations_left = []
observations_right = []
for file_left, file_right in zip(files_left, files_right):
    # left image
    image = pb.load_single_band(file_left, np.float32)
    detector.detect(image)
    o = {"width": image.getWidth(), "height": image.getHeight()}
    if detector.detected_markers:
        print("success " + file_left)
        o["pixels"] = detector.detected_markers[0]["landmarks"]
    else:
        o["pixels"] = []
    observations_left.append(o)

    # right image
    image = pb.load_single_band(file_right, np.float32)
    detector.detect(image)
    o = {"width": image.getWidth(), "height": image.getHeight()}
    if detector.detected_markers:
        print("success " + file_right)
Exemple #33
0
#!/usr/bin/env python3

import pyboof as pb
import numpy as np
import os
import cv2

# Demonstration of how to detect points in a calibration target
data_path = "../data/example/calibration/stereo/Bumblebee2_Chess/"

print("Configuring and creating a chessboard detector")
config_grid = pb.ConfigGridDimen(num_rows=5, num_cols=7, square_width=0.3)
detector = pb.FactoryFiducialCalibration.chessboardB(config_grid)

print("Detecting image")
image = pb.load_single_band(os.path.join(data_path, "left01.jpg"), np.float32)
detector.detect(image)

print("Detected points {}".format(len(detector.detected_points)))

# Convert it into a color image for visualization purposes
ndimage = pb.boof_to_ndarray(image).astype(np.uint8)
ndimage = cv2.cvtColor(ndimage, cv2.COLOR_GRAY2RGB)

# Draw green dots with red outlines
for x in detector.detected_points:
    # x[0] is index of the control point
    # x[1] and x[2] is the (x,y) pixel coordinate, sub-pixel precision
    cv2.circle(ndimage, (int(x[1]), int(x[2])), 7, (0, 0, 255), -1)
    cv2.circle(ndimage, (int(x[1]), int(x[2])), 5, (0, 255, 0), -1)
import numpy as np

import matplotlib.pyplot as plt
import pyboof as pb

# Enable use of memory mapped files for MUCH faster conversion between some python and boofcv data types
pb.init_memmap(5)

# Load two images
image0 = pb.load_single_band("../data/example/stereo/chair01_left.jpg", np.uint8)
image1 = pb.load_single_band("../data/example/stereo/chair01_right.jpg", np.uint8)

# Load stereo rectification
stereo_param = pb.StereoParameters()
stereo_param.load("../data/example/calibration/stereo/Bumblebee2_Chess/stereo.xml")

# Rectify and undistort the images
model_rectifier = pb.StereoRectification(stereo_param.left, stereo_param.right, stereo_param.right_to_left)
model_rectifier.all_inside_left()

distort_left = model_rectifier.create_distortion(pb.ImageType(image0.getImageType()), True)
distort_right = model_rectifier.create_distortion(pb.ImageType(image0.getImageType()), False)

rect0 = image0.createSameShape()
rect1 = image1.createSameShape()

distort_left.apply(image0, rect0)
distort_right.apply(image1, rect1)

# Configure and compute disparity
config = pb.ConfigStereoDisparity()