예제 #1
0
    def processSpotImages(base_path, color_index=None):
        """Process a set of mat file images.

        The images should contain light spots at different locations.
        """

        if color_index is None:
            colors = slice(3)
        else:
            colors = slice(color_index, color_index+1)

        path = os.path.join(base_path, "*.mat")
        imgs_paths = glob.glob(path)
        measurements = []
        for img_path in imgs_paths:
            img = sio.loadmat(img_path)["img"]
            try:
                measurements.append(
                    [findSpot(c) for c in raw2RGB(img)[colors]]
                )
            except:
                pass

        #
        # Arrange the measurements as a list of colors
        #
        measurements = zip(*measurements)

        return measurements
예제 #2
0
def measureRGB(raw_array):
    """Measure mean rgb values in raw array captured of integral sphere."""

    R, G, B = raw2RGB(raw_array)
    RGB = np.dstack((R, G, B))
    masks = [(C > 0.75 * C.max()) & (C < 255) for C in (R, G, B)]
    r, g, b = [np.mean(C[mask]) for C, mask in zip((R, G, B), masks)]

    return r, g, b
예제 #3
0
    def update_3dplot(self):

        color = self.colors_list

        with open(os.path.join(base_path1, 'measurements.pkl'), 'rb') as f:
            measurements1 = cPickle.load(f)
        with open(os.path.join(base_path2, 'measurements.pkl'), 'rb') as f:
            measurements2 = cPickle.load(f)

        vc1 = VignettingCalibration.readMeasurements(
            base_path1,
            polynomial_degree=self.polynomial_degree,
            residual_threshold=self.residual_threshold)
        vc2 = VignettingCalibration.readMeasurements(
            base_path2,
            polynomial_degree=self.polynomial_degree,
            residual_threshold=self.residual_threshold)

        x1, y1, z1 = zip(*[a for a in measurements1 if a[0] is not None])
        x2, y2, z2 = zip(*[a for a in measurements2 if a[0] is not None])

        for scene, (x, y, z), vc in zip([self.scene_3D1, self.scene_3D2],
                                        ((x1, y1, z1), (x2, y2, z2)),
                                        (vc1, vc2)):
            mlab.clf(figure=scene.mayavi_scene)

            zs = np.array(z)[..., COLOR_INDICES[color]]
            zs = 255 * zs / zs.max()
            mlab.points3d(x,
                          y,
                          zs,
                          mode='sphere',
                          scale_mode='scalar',
                          scale_factor=20,
                          color=(1, 1, 1),
                          figure=scene.mayavi_scene)

            surface = np.ones((1200, 1600))
            corrected = raw2RGB(255 / vc.applyVignetting(surface))

            mlab.surf(corrected[COLOR_INDICES[color]].T,
                      extent=(0, 1600, 0, 1200, 0, 255),
                      opacity=0.5)
            mlab.outline(color=(0, 0, 0),
                         extent=(0, 1600, 0, 1200, 0, 255),
                         figure=scene.mayavi_scene)
예제 #4
0
def findSpot(img, threshold=5):
    """Calculate spot value in image.

    Returns (height, x, y, width_x, width_y)
    the gaussian parameters of a 2D distribution by calculating its
    moments"""

    import cv2

    #
    # Mask threshold
    #
    img_median = img.max()/2
    if img_median < threshold:
        raise Exception('Median too small: {}'.format(img_median))

    #
    # Calculate a spot mask.
    #
    kernel = np.ones((3, 3),np.uint8)
    mask = (img > threshold)
    mask = cv2.dilate(mask.astype(np.uint8), kernel)
    mask = cv2.erode(mask.astype(np.uint8), kernel, iterations=2)

    #
    # Image values at spot.
    #
    img_filt = img.astype(np.float) * mask
    total = img_filt.sum()
    if total == 0:
        raise Exception('Empty Image')

    #
    # Calc the first momentum (center) of the spot
    #
    Y, X = np.indices(img.shape)
    x = (X*img_filt).sum()/total
    y = (Y*img_filt).sum()/total

    return int(x), int(y), [meanColor(c) for c in raw2RGB(img_filt)]
예제 #5
0
def main():
    """Run the calibration of the cameras."""

    #
    # Initialize the calibration setup.
    #
    CameraNetwork.initialize_logger(log_level=logging.DEBUG)

    import oceanoptics
    spec = oceanoptics.get_a_random_spectrometer()

    p = Gimbal(com="COM3")

    #
    # Put here a break point if you want to adjust the focus.
    # Note: You will have to stop the program to be able to turn on the IDS cockpit.
    #
    p.move(90, 90)

    cv2.namedWindow("image", flags=cv2.WINDOW_AUTOSIZE)
    cam = IDSCamera(callback=capture_callback)

    #
    # All the results of the calibration are stored on disk.
    #
    date_sign = datetime.now().strftime("%Y_%m_%d")
    results_path = os.path.join('vignetting_calibration',
                                cam.info['serial_num'], date_sign)
    safe_mkdirs(results_path)
    safe_mkdirs(os.path.join(results_path, 'images'))

    #
    # Calibration data is stored in repo.
    #
    data_path = pkg_resources.resource_filename(__name__,
                                                '../data/calibration/')
    data_path = os.path.join(data_path, cam.info['serial_num'], date_sign)
    safe_mkdirs(data_path)

    #
    # Capture settings.
    #
    settings = {
        "exposure_us": GEOMETRIC_EXPOSURE,
        "gain_db": 0,
        "gain_boost": False,
        "color_mode": gs.COLOR_RGB
    }

    #
    # Store the exposure time
    #
    with open(os.path.join(results_path, 'settings.pkl'), 'wb') as f:
        cPickle.dump(settings, f)

    ############################################################################
    # Perform geometric Calibration
    ############################################################################
    if DO_GEOMETRIC_CALIBRATION:
        #imgs = sorted(glob.glob(os.path.join(results_path, 'geometric/*.jpg')))
        #imgs = [cv2.imread(img) for img in imgs]
        safe_mkdirs(os.path.join(results_path, 'geometric'))

        X_grid, Y_grid = np.meshgrid(np.linspace(GEOMETRIC_XMIN,
                                                 GEOMETRIC_XMAX,
                                                 GEOMETRIC_STEPS),
                                     np.linspace(GEOMETRIC_YMIN,
                                                 GEOMETRIC_YMAX,
                                                 GEOMETRIC_STEPS),
                                     indexing='xy')
        X_grid = X_grid.astype(np.int32)
        Y_grid = Y_grid.astype(np.int32)

        imgs = []
        img_index = 0
        raw_input("Put the chessboard and press any key")
        for _ in range(5):
            for (x, y) in zip(np.random.choice(X_grid.ravel(), size=20),
                              np.random.choice(Y_grid.ravel(), size=20)):
                logging.debug("Moved gimbal to position: ({})".format(
                    (int(x), int(y))))
                p.move(int(x), int(y))
                time.sleep(GEOMETRIC_SLEEP_TIME)

                img, _, _ = cam.capture(settings, frames_num=1)

                #
                # Save image for debugging the calibration process.
                #
                cv2.imwrite(
                    os.path.join(results_path, 'geometric',
                                 'img_{:03}.jpg'.format(img_index)), img)

                img_index += 1
                imgs.append(img)

        #
        # Use the fisheye model
        #
        fe = fisheye.FishEye(nx=NX, ny=NY, verbose=True)

        rms, K, D, rvecs, tvecs, mask = fe.calibrate(
            imgs=imgs,
            show_imgs=True,
            calibration_flags=cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC +
            cv2.fisheye.CALIB_FIX_SKEW,
            return_mask=True)

        if SHOW_REPROJECTION:
            cv2.namedWindow("Reprojected img", cv2.WINDOW_AUTOSIZE)
            vec_cnt = 0
            safe_mkdirs(os.path.join(results_path, 'reprojection'))
            for img_index, m in enumerate(mask):
                if not m:
                    continue
                rep_img = cv2.drawChessboardCorners(
                    imgs[img_index].copy(), (NX, NY),
                    fe.projectPoints(rvec=rvecs[vec_cnt], tvec=tvecs[vec_cnt]),
                    1)
                cv2.imshow("Reprojected img", rep_img)
                cv2.waitKey(500)
                cv2.imwrite(
                    os.path.join(results_path, 'reprojection',
                                 'img_{:03}.jpg'.format(img_index)), rep_img)
                vec_cnt += 1

            cv2.destroyAllWindows()

        normalization = Normalization(1001, FisheyeProxy(fe))
        img_normalized = normalization.normalize(imgs[0])
        plt.imshow(img_normalized, cmap='gray')
        plt.show()

        #
        # Save the geometric calibration
        #
        fe.save(os.path.join(results_path, 'fisheye.pkl'))
        fe.save(os.path.join(data_path, 'fisheye.pkl'))
    else:
        fe = fisheye.load_model(os.path.join(results_path, 'fisheye.pkl'))

    settings["color_mode"] = gs.COLOR_RAW
    settings["exposure_us"] = VIGNETTING_EXPOSURE

    ############################################################################
    # Measure dark noise
    ############################################################################
    if DO_BLACK_IMG:
        p.move(90, 90)
        time.sleep(GEOMETRIC_SLEEP_TIME)
        raw_input("Turn off the lights and press any key")
        winsound.Beep(5000, 500)

        black_img = np.mean(cam.capture(settings, frames_num=10)[0], axis=2)
        winsound.Beep(5000, 500)
        np.save(os.path.join(results_path, 'black_img.npy'), black_img)
    else:
        black_img = np.load(os.path.join(results_path, 'black_img.npy'))

    #
    # Print the COLIBRI LED POWER
    #
    raw_input("Set COLIBRI LEDS: {}, and press any key.".format([
        (k, v) for k, v in LED_POWER.items()
    ]))

    ############################################################################
    # Measure the spectrum
    ############################################################################
    spec.integration_time(0.3)
    measurements = []
    for i in range(10):
        s = spec.spectrum()
        measurements.append(s[1])

    with open(os.path.join(results_path, 'spec.pkl'), 'wb') as f:
        cPickle.dump((s[0], np.mean(measurements, axis=0)), f)
    with open(os.path.join(data_path, 'spec.pkl'), 'wb') as f:
        cPickle.dump((s[0], np.mean(measurements, axis=0)), f)

    #
    # Verify no saturation.
    #
    p.move(90, 90)
    time.sleep(GEOMETRIC_SLEEP_TIME)
    img, exp, gain = cam.capture(settings, frames_num=1)
    if img.max() == 255:
        raise Exception('Saturation')

    print("Maximal color values: {}".format([c.max() for c in raw2RGB(img)]))

    ############################################################################
    # Perform Vignetting calibration.
    ############################################################################
    X_grid, Y_grid = np.meshgrid(np.linspace(0, 180, VIGNETTING_STEPS),
                                 np.linspace(0, 180, VIGNETTING_STEPS),
                                 indexing='xy')
    X_grid = X_grid.astype(np.int32)
    Y_grid = Y_grid.astype(np.int32)

    measurements = []
    for i, (x, y) in enumerate(zip(X_grid.ravel(), Y_grid.ravel())):
        sys.stdout.write('x={}, y={}...\t'.format(x, y))

        p.move(x, y)
        time.sleep(VIGNETTING_SLEEP_TIME)
        img = np.mean(cam.capture(settings, frames_num=10)[0], axis=2)
        winsound.Beep(8000, 500)
        try:
            measurement = findSpot(np.clip(img - black_img, 0, 255))
        except:
            print('FAIL')
            print(traceback.format_exc())
            measurement = None, None, None

        measurements.append(measurement)
        print(measurement)

        #
        # Store the measurement image.
        #
        img_path = os.path.join(results_path, 'images',
                                'img_{:03}.mat'.format(i))
        sio.savemat(img_path, {'img': img}, do_compression=True)

    img = np.zeros(shape=(1200, 1600, 3))

    for x, y, val in measurements:
        if val is None:
            continue
        img[y, x, ...] = val

    #
    # Save the results
    #
    sio.savemat(os.path.join(results_path, 'results.mat'), {'img': img},
                do_compression=True)
    with open(os.path.join(results_path, 'measurements.pkl'), 'wb') as f:
        cPickle.dump(measurements, f)

    #
    # Calculate Vignetting correction.
    #
    vc = VignettingCalibration.readMeasurements(results_path)
    vc.save(os.path.join(results_path, gs.VIGNETTING_SETTINGS_FILENAME))
    vc.save(os.path.join(data_path, gs.VIGNETTING_SETTINGS_FILENAME))
    print("The STD Vignetting Error per color is: {}".format(vc._stds))

    #
    # Visualize the vingetting
    #
    normalization = Normalization(gs.DEFAULT_NORMALIZATION_SIZE,
                                  FisheyeProxy(fe))
    d = normalization.normalize(np.dstack(raw2RGB(vc.ratio)))
    plt.imshow(d)
    plt.show()
    sio.savemat(os.path.join(results_path, "vignetting_norm.mat"),
                dict(img=d),
                do_compression=True)
    cv2.destroyAllWindows()