Beispiel #1
0
# Demonstration of how to detect points in a calibration target
data_path = "../data/example/calibration/stereo/Bumblebee2_Chess/"

print("Configuring and creating a chessboard detector")
config_grid = pb.ConfigGridDimen(num_rows=5, num_cols=7, square_width=0.3)
detector = pb.FactoryFiducialCalibration.chessboardB(config_grid)

print("Detecting image")
image = pb.load_single_band(os.path.join(data_path, "left01.jpg"), np.float32)
detector.detect(image)

print("Detected points {}".format(len(detector.detected_points)))

# Convert it into a color image for visualization purposes
ndimage = pb.boof_to_ndarray(image).astype(np.uint8)
ndimage = cv2.cvtColor(ndimage, cv2.COLOR_GRAY2RGB)

# Draw green dots with red outlines
for x in detector.detected_points:
    # x[0] is index of the control point
    # x[1] and x[2] is the (x,y) pixel coordinate, sub-pixel precision
    cv2.circle(ndimage, (int(x[1]), int(x[2])), 7, (0, 0, 255), -1)
    cv2.circle(ndimage, (int(x[1]), int(x[2])), 5, (0, 255, 0), -1)

cv2.imshow('Detected Control Points', ndimage)

print("Select Window and Press Any Key")
while cv2.getWindowProperty('Detected Control Points', 0) >= 0:
    if cv2.waitKey(50) != -1:
        break
Beispiel #2
0
    # Capture sequence frame-by-frame
    ret, frame = cap.read()

    # Convert it into a boofcv image
    boof_color = pb.ndarray_to_boof(frame, boof_color)
    # Convert it into the image type required by the tracker
    pb.convert_boof_image(boof_color, image_input)

    # Track the point objects
    time0 = time.time() * 1000.0
    if not video_mosaic.process(image_input):
        print("mosaic failed!")
        video_mosaic.reset()
        continue
    time1 = time.time() * 1000.0

    # Get the mosaic image and display the results
    boof_mosaic = video_mosaic.get_stitched_image()
    ndarray_mosaic = pb.boof_to_ndarray(boof_mosaic)

    cv2.imshow("Video Mosaic", ndarray_mosaic)

    print("mosaic: {:6.2f} ms".format(time1 - time0))

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
    pb.ImageType(image0.getImageType()), True)
distort_right = model_rectifier.create_distortion(
    pb.ImageType(image0.getImageType()), False)

rect0 = image0.createSameShape()
rect1 = image1.createSameShape()

distort_left.apply(image0, rect0)
distort_right.apply(image1, rect1)

# Configure and compute disparity
config = pb.ConfigStereoDisparity()
config.minDisparity = 10
config.maxDisparity = 60

factory = pb.FactoryStereoDisparity(np.uint8)

disparityAlg = factory.region_wta(config)

disparityAlg.process(rect0, rect1)

disparity_image = pb.boof_to_ndarray(disparityAlg.get_disparity_image())
# disparity images is in a weird format.  disparity - min disparity and a value more than max-min if invalid
# legacy from 8bit disparity images
disparity_image[:] += 10
disparity_image[disparity_image > 70] = float('nan')

plt.imshow(disparity_image)
plt.show()

print("Done!")
transform = pb.NarrowToWideFovPtoP(narrow_model=model_pinhole,
                                   wide_model=model_fisheye)

image_fisheye = pb.load_planar(os.path.join(data_path, "front_hike.jpg"),
                               np.uint8)
image_pinhole = image_fisheye.createNew(model_pinhole.width,
                                        model_pinhole.height)

image_distorter = transform.create_image_distort(
    pb.ImageType(image_fisheye.getImageType()))

image_distorter.apply(image_fisheye, image_pinhole)

# Make the fisheye image smaller so that it's easier to manage
small_fisheye = cv2.resize(pb.boof_to_ndarray(image_fisheye),
                           None,
                           fx=0.5,
                           fy=0.5,
                           interpolation=cv2.INTER_LINEAR)

cv2.imshow("Fisheye", small_fisheye[..., [2, 1, 0]])
cv2.imshow("Pinhole", pb.boof_to_ndarray(image_pinhole)[..., [2, 1, 0]])

# Rotate the camera so that it's focused on the path behind and rotated to appear "up"
transform.set_rotation_wide_to_narrow(
    transforms3d.euler.euler2mat(-0.1, -0.4, -1.7, axes='sxyz'))
image_distorter.apply(image_fisheye, image_pinhole)
cv2.imshow("Pinhole Rotated",
           pb.boof_to_ndarray(image_pinhole)[..., [2, 1, 0]])
distort_left = model_rectifier.create_distortion(pb.ImageType(image0.getImageType()), True)
distort_right = model_rectifier.create_distortion(pb.ImageType(image0.getImageType()), False)

rect0 = image0.createSameShape()
rect1 = image1.createSameShape()

distort_left.apply(image0, rect0)
distort_right.apply(image1, rect1)

# Configure and compute disparity
config = pb.ConfigStereoDisparity()
config.minDisparity = 10
config.maxDisparity = 60

factory = pb.FactoryStereoDisparity(np.uint8)

disparityAlg = factory.region_wta(config)

disparityAlg.process(rect0, rect1)

disparity_image = pb.boof_to_ndarray(disparityAlg.get_disparity_image())
# disparity images is in a weird format.  disparity - min disparity and a value more than max-min if invalid
# legacy from 8bit disparity images
disparity_image[:] += 10
disparity_image[disparity_image > 70] = float('nan')

plt.imshow(disparity_image)
plt.show()

print "Done!"