Exemple #1
0
 def test_parameterSet(self):
     cam = ct.Camera(ct.RectilinearProjection(), ct.SpatialOrientation())
     cam.defaults.elevation_m = 99
     assert cam.elevation_m == 99
     assert cam.defaults.elevation_m == 99
     cam.elevation_m = 10
     assert cam.elevation_m == 10
     assert cam.defaults.elevation_m == 99
     cam.elevation_m = None
     assert cam.elevation_m == 99
     assert cam.defaults.elevation_m == 99
     # test if the parameter sets are liked properly
     cam.orientation.elevation_m = 900
     assert cam.elevation_m == 900
     cam.elevation_m = 800
     assert cam.orientation.elevation_m == 800
     # test non parameter
     cam.foo = 99
     assert cam.foo == 99
     cam.defaults.foo = 99
     assert cam.defaults.foo == 99
     self.assertRaises(AttributeError, lambda: cam.bla)
     self.assertRaises(AttributeError, lambda: cam.parameters.bla)
     self.assertRaises(AttributeError, lambda: cam.projection.bla)
     self.assertRaises(AttributeError, lambda: cam.orientation.bla)
     self.assertRaises(AttributeError, lambda: cam.defaults.bla)
Exemple #2
0
def startDemonstratorGUI():
    cam = ct.Camera(ct.RectilinearProjection(focallength_px=3860, image=[4608, 2592]))

    app = QtWidgets.QApplication(sys.argv)

    window = Window(cam, Scene9Cubes(cam))
    window.show()
    app.exec_()
def get_camera_params(project_path, location, display=False):
    """Compute camera parameters using yolo inputs and save it"""
    # Paths
    yolo_folder = project_path + 'models/yolo_coco/'
    image_folder = project_path + 'video_scraping/' + location + '/'

    # Get one frame for initialization
    frame = plt.imread(image_folder + sorted(os.listdir(image_folder))[-1])

    # Intrinsic camera parameters
    focal = 6.2  # in mm
    sensor_size = (6.17, 4.55)  # in mm
    frame_size = (frame.shape[1], frame.shape[0])  # (width, height) in pixel

    # Initialize the camera
    camera = ct.Camera(
        ct.RectilinearProjection(focallength_mm=focal,
                                 sensor=sensor_size,
                                 image=frame_size))

    # Calibrate using people detection
    feet, heads = yolo_collection(yolo_folder, image_folder)
    camera.addObjectHeightInformation(
        feet, heads, 1.7,
        0.3)  # Person average height in meter + std information

    # Fitting camera parameters
    trace = camera.metropolis(
        [
            # ct.FitParameter("heading_deg", lower=-180, upper=180, value=0),
            # ct.FitParameter("roll_deg", lower=-180, upper=180, value=0),
            ct.FitParameter("elevation_m", lower=0, upper=100, value=5),
            ct.FitParameter("tilt_deg", lower=0, upper=180, value=45)
        ],
        iterations=1e4)

    # Save camera parameters
    camera.save(project_path + 'camera_params/' + location +
                '_camera_params.json')

    if display:
        # Display calibration information
        camera.plotTrace()
        plt.tight_layout()
        plt.show()

        plt.figure('Calibration information', figsize=(15, 10))
        plt.subplot(1, 2, 1)
        camera.plotFitInformation(frame)
        plt.legend()

        plt.subplot(1, 2, 2)
        camera.getTopViewOfImage(frame, [-10, 10, 0, 20], do_plot=True)
        plt.xlabel("x position in m")
        plt.ylabel("y position in m")
        plt.show()
    def test_init_cam(self):
        # intrinsic camera parameters
        f = 6.2
        sensor_size = (6.17, 4.55)
        image_size = (3264, 2448)

        # initialize the camera
        cam = ct.Camera(
            ct.RectilinearProjection(focallength_mm=f,
                                     image_width_px=image_size[1],
                                     image_height_px=image_size[0],
                                     sensor_width_mm=sensor_size[1],
                                     sensor_height_mm=sensor_size[0]),
            ct.SpatialOrientation())
 def test_lens(self, lens, proj, k):
     if lens == ct.NoDistortion:
         lens = lens()
     else:
         lens = lens(k)
     cam = ct.Camera(projection=proj, lens=lens)
     y = [proj.image_height_px * 0.5] * 100
     x = np.linspace(0, 1, 100) * proj.image_width_px
     pos0 = np.round(np.array([x, y]).T).astype(int)
     pos1 = cam.lens.distortedFromImage(pos0)
     pos2 = np.round(cam.lens.imageFromDistorted(pos1)).astype(int)
     np.testing.assert_almost_equal(
         pos2,
         pos0,
         0,
         err_msg="Transforming from distorted to undistorted image fails.")
def camera_down_with_world_points(draw,
                                  projection=projection(),
                                  n=st.one_of(st.integers(1, 1000),
                                              st.just(1))):
    camera = ct.Camera(projection=draw(projection),
                       orientation=ct.SpatialOrientation())
    n = draw(n)
    # the points can either be
    if n == 1:
        points = draw(
            st.tuples(st.floats(-100, 100), st.floats(-100, 100),
                      st.floats(-100, 100)))
    else:
        points = draw(
            st_np.arrays(dtype="float",
                         shape=st.tuples(st.just(n), st.just(3)),
                         elements=st.floats(-100, 100)))
    return camera, points
Exemple #7
0
    def test_fitCamParametersFromObjects(self):
        # setup the camera
        im = np.ones((2592, 4608, 3))
        camera = ct.Camera(
            ct.RectilinearProjection(focallength_px=3863.64, image=im))

        feet = np.array([[1968.73191418, 2291.89125757],
                         [1650.27266115, 2189.75370951],
                         [1234.42623164, 2300.56639535],
                         [927.4853119, 2098.87724083],
                         [3200.40162013, 1846.79042709],
                         [3385.32781138, 1690.86859965],
                         [2366.55011031, 1446.05084045],
                         [1785.68269333, 1399.83787022],
                         [889.30386193, 1508.92532749],
                         [4107.26569943, 2268.17045783],
                         [4271.86353701, 1889.93651518],
                         [4007.93773879, 1615.08452509],
                         [2755.63028039, 1976.00345458],
                         [3356.54352228, 2220.47263494],
                         [407.60113016, 1933.74694958],
                         [1192.78987735, 1811.07247163],
                         [1622.31086201, 1707.77946355],
                         [2416.53943619, 1775.68148688],
                         [2056.81514201, 1946.4146027],
                         [2945.35225814, 1617.28314118],
                         [1018.41322935, 1499.63957113],
                         [1224.2470045, 1509.87120351],
                         [1591.81599888, 1532.33339856],
                         [1701.6226147, 1481.58276189],
                         [1954.61833888, 1405.49985098],
                         [2112.99329583, 1485.54970652],
                         [2523.54106057, 1534.87590467],
                         [2911.95610793, 1448.87104305],
                         [3330.54617013, 1551.64321531],
                         [2418.21276457, 1541.28499777],
                         [1771.1651859, 1792.70568482],
                         [1859.30409241, 1904.01744759],
                         [2805.02878512, 1881.00463747],
                         [3138.67003071, 1821.05082989],
                         [3355.45215983, 1910.47345815],
                         [734.28038607, 1815.69614796],
                         [978.36733356, 1896.36507827],
                         [1350.63202232, 1979.38798787],
                         [3650.89052382, 1901.06620751],
                         [3555.47087822, 2332.50027861],
                         [865.71688784, 1662.27834394],
                         [1115.89438493, 1664.09341647],
                         [1558.93825646, 1671.02167477],
                         [1783.86089289, 1679.33599881],
                         [2491.01579305, 1707.84219953],
                         [3531.26955813, 1729.08486338],
                         [3539.6318973, 1776.5766387],
                         [4150.36451427, 1840.90968707],
                         [2112.48684812, 1714.78834459],
                         [2234.65444134, 1756.17059266]])
        heads = np.array([[1968.45971142, 2238.81171866],
                          [1650.27266115, 2142.33767714],
                          [1233.79698528, 2244.77321846],
                          [927.4853119, 2052.2539967],
                          [3199.94718145, 1803.46727222],
                          [3385.32781138, 1662.23146061],
                          [2366.63609066, 1423.52398752],
                          [1785.68269333, 1380.17615549],
                          [889.30386193, 1484.13026407],
                          [4107.73533808, 2212.98791584],
                          [4271.86353701, 1852.85753597],
                          [4007.93773879, 1586.36656606],
                          [2755.89171994, 1938.22544024],
                          [3355.91105749, 2162.91833832],
                          [407.60113016, 1893.63300333],
                          [1191.97371829, 1777.60995028],
                          [1622.11915337, 1678.63975025],
                          [2416.31761434, 1743.29549618],
                          [2056.67597009, 1910.09072955],
                          [2945.35225814, 1587.22557592],
                          [1018.69818061, 1476.70099517],
                          [1224.55272475, 1490.30510731],
                          [1591.81599888, 1510.72308329],
                          [1701.45016126, 1460.88834824],
                          [1954.734384, 1385.54008964],
                          [2113.14023137, 1465.41953732],
                          [2523.54106057, 1512.33125811],
                          [2912.08384338, 1428.56110628],
                          [3330.40769371, 1527.40984208],
                          [2418.21276457, 1517.88006678],
                          [1770.94524662, 1761.25436746],
                          [1859.30409241, 1867.88794433],
                          [2804.69006305, 1845.10009734],
                          [3138.33130864, 1788.53351052],
                          [3355.45215983, 1873.21402971],
                          [734.49504829, 1780.27688131],
                          [978.1022294, 1853.9484135],
                          [1350.32991656, 1938.60371039],
                          [3650.89052382, 1863.97713098],
                          [3556.44897343, 2278.37901052],
                          [865.41437575, 1633.53969555],
                          [1115.59187284, 1640.49747358],
                          [1558.06918395, 1647.12218082],
                          [1783.86089289, 1652.74740383],
                          [2491.20950909, 1677.42878081],
                          [3531.11177814, 1696.89774656],
                          [3539.47411732, 1745.49398176],
                          [4150.01023142, 1803.35570469],
                          [2112.84669376, 1684.92115685],
                          [2234.65444134, 1724.86402238]])

        camera.addObjectHeightInformation(feet, heads, 0.75, 0.03)
        camera.addObjectHeightInformation(feet[0], heads[0], 0.75, 0.03)
        camera.addObjectHeightInformation(feet[:2], heads[:2], [0.75, 0.7],
                                          0.03)

        horizon = np.array([[418.2195998, 880.253216],
                            [3062.54424509, 820.94125636]])
        camera.addHorizonInformation(horizon, uncertainty=10)
        camera.addHorizonInformation(horizon[0], uncertainty=10)

        camera.setGPSpos("66°39'53.4\"S  140°00'34.8\"")
        gps = ct.gpsFromString("66°39'53.4\"S  140°00'34.8\"")
        camera.setGPSpos(gps)
        camera.setGPSpos(gps[0], gps[1], 0)

        lm_points_px = np.array([[2091.300935, 892.072126],
                                 [2935.904577, 824.364956]])
        lm_points_gps = ct.gpsFromString([
            ("66°39'56.12862''S  140°01'20.39562''", 13.769),
            ("66°39'58.73922''S  140°01'09.55709''", 21.143)
        ])
        lm_points_space = camera.spaceFromGPS(lm_points_gps)

        camera.addLandmarkInformation(lm_points_px, lm_points_space, [3, 3, 5])
        camera.addLandmarkInformation(lm_points_px[0], lm_points_space[0],
                                      [3, 3, 5])

        trace = camera.metropolis([
            ct.FitParameter("elevation_m", lower=0, upper=100, value=20),
            ct.FitParameter("tilt_deg", lower=0, upper=180, value=80),
            ct.FitParameter("heading_deg", lower=-180, upper=180, value=-77),
            ct.FitParameter("roll_deg", lower=-180, upper=180, value=0)
        ],
                                  iterations=1e3)
        print(trace)

        camera.fit([
            ct.FitParameter("elevation_m", lower=0, upper=100, value=20),
            ct.FitParameter("tilt_deg", lower=0, upper=180, value=80),
            ct.FitParameter("heading_deg", lower=-180, upper=180, value=-77),
            ct.FitParameter("roll_deg", lower=-180, upper=180, value=0)
        ],
                   iterations=1e3)
        """
        camera.fridge([
            ct.FitParameter("elevation_m", lower=0, upper=100, value=20),
            ct.FitParameter("tilt_deg", lower=0, upper=180, value=80),
            ct.FitParameter("heading_deg", lower=-180, upper=180, value=-77),
            ct.FitParameter("roll_deg", lower=-180, upper=180, value=0)
        ], iterations=1e2)
        """

        camera.plotTrace()
        camera.plotFitInformation(im)
Exemple #8
0
    def updateCameraParameters(self):
        """
        update camera dictionary for calculation - uses potentially user modified data from gui
        """

        # update current cam parameters
        self.cam = dotdict()
        self.cam.fov_h_deg = getFloat(self.leFOV_horizontal.text())
        self.cam.fov_v_deg = getFloat(self.leFOV_vertical.text())
        self.cam.sensor_w_mm = getFloat(self.leSensor_width.text())
        self.cam.sensor_h_mm = getFloat(self.leSensor_height.text())
        self.cam.focallength_mm = getFloat(self.leFocallength.text())
        self.cam.img_w_px = getInteger(self.leImage_width.text())
        self.cam.img_h_px = getInteger(self.leImage_height.text())
        self.cam.center_x_px = self.cam.img_w_px / 2. + getFloat(
            self.leOffsetX.text())
        self.cam.center_y_px = self.cam.img_h_px / 2. + getFloat(
            self.leOffsetY.text())

        self.cam.heading_deg = getFloat(self.leCamPan.text())
        self.cam.tilt_deg = getFloat(self.leCamTilt.text())
        self.cam.roll_deg = getFloat(self.leCamRoll.text())

        self.cam.elevation_m = getFloat(self.leCamElevation.text())
        self.cam.pos_x_m = getFloat(self.leCamPosX.text())
        self.cam.pos_y_m = getFloat(self.leCamPosY.text())

        self.cam.projection = self.cbProjection.itemText(
            self.cbProjection.currentIndex())

        self.cam.gps_lat = getFloat(self.leCamLat.text())
        self.cam.gps_lon = getFloat(self.leCamLon.text())

        if self.cam.sensor_h_mm is None or self.cam.sensor_w_mm is None:
            self.calcSensorDimensionsFromFOV()

        # save parameters as options to DB
        self.setOption('DM_focallength_mm', self.cam.focallength_mm)
        self.setOption('DM_fov_h_deg', self.cam.fov_h_deg)
        self.setOption('DM_fov_v_deg', self.cam.fov_v_deg)
        self.setOption('DM_sensor_w_mm', self.cam.sensor_w_mm)
        self.setOption('DM_sensor_h_mm', self.cam.sensor_h_mm)
        self.setOption('DM_img_w_px', self.cam.img_w_px)
        self.setOption('DM_img_h_px', self.cam.img_h_px)
        self.setOption('DM_offset_x_px', getFloat(self.leOffsetX.text()))
        self.setOption('DM_offset_y_px', getFloat(self.leOffsetY.text()))

        print("Camera:")
        print(json.dumps(self.cam, indent=4, sort_keys=True))

        self.position = dotdict()
        self.position.cam_elevation = getFloat(self.leCamElevation.text())
        self.position.plane_elevation = getFloat(self.lePlaneElevation.text())

        # save parameters to options
        self.setOption('DM_cam_elevation', self.position.cam_elevation)
        self.setOption('DM_plane_elevation', self.position.plane_elevation)

        print("Position:")
        print(json.dumps(self.position, indent=4, sort_keys=True))

        print(self.cam)

        # update camera parameters
        # self.camera = ct.CameraTransform(self.cam.focallength_mm, [self.cam.sensor_w_mm, self.cam.sensor_h_mm],[self.cam.img_w_px, self.cam.img_h_px])
        orientation = ct.SpatialOrientation(heading_deg=self.cam.heading_deg,
                                            tilt_deg=self.cam.tilt_deg,
                                            roll_deg=self.cam.roll_deg,
                                            elevation_m=self.cam.elevation_m,
                                            pos_x_m=self.cam.pos_x_m,
                                            pos_y_m=self.cam.pos_y_m)
        if self.cam.projection == "Cylindrical":
            projection = ct.CylindricalProjection(
                focallength_mm=self.cam.focallength_mm,
                image_width_px=self.cam.img_w_px,
                image_height_px=self.cam.img_h_px,
                sensor_width_mm=self.cam.sensor_w_mm,
                sensor_height_mm=self.cam.sensor_h_mm,
                center_x_px=self.cam.center_x_px,
                center_y_px=self.cam.center_y_px)
        elif self.cam.projection == "Equirectangular":
            projection = ct.EquirectangularProjection(
                focallength_mm=self.cam.focallength_mm,
                image_width_px=self.cam.img_w_px,
                image_height_px=self.cam.img_h_px,
                sensor_width_mm=self.cam.sensor_w_mm,
                sensor_height_mm=self.cam.sensor_h_mm,
                center_x_px=self.cam.center_x_px,
                center_y_px=self.cam.center_y_px)

        else:  # default to rectilinear projection
            print("Defaulting to rectiliniear")
            projection = ct.RectilinearProjection(
                focallength_mm=self.cam.focallength_mm,
                image_width_px=self.cam.img_w_px,
                image_height_px=self.cam.img_h_px,
                sensor_width_mm=self.cam.sensor_w_mm,
                sensor_height_mm=self.cam.sensor_h_mm,
                center_x_px=self.cam.center_x_px,
                center_y_px=self.cam.center_y_px)
        self.camera = ct.Camera(orientation=orientation, projection=projection)
        self.camera.setGPSpos(lat=self.cam.gps_lat,
                              lon=self.cam.gps_lon,
                              elevation=self.cam.elevation_m)
    def __init__(self, camera_params):

        self.bridge = CvBridge()
        for camera_id in range(camera_params['n_cameras']):
            setattr(self, 'last_image_%02i' % (camera_id, ),
                    [[None, None, None]])
        self.last_model_states = ModelStates()
        self.spawned_objects = []

        # Camera IDs and corresponding variables holding images
        self.camera_type = camera_params['camera_type']
        self.update_rate = camera_params['update_rate']
        self.image_dims = camera_params['camera_resolution'] + (3, )
        self.camera_dict = {'%02i' % (camera_id,): getattr(self, 'last_image_%02i'\
            % (camera_id,)) for camera_id in range(camera_params['n_cameras'])}

        # ROS Subscribers, Publishers, Services
        for camera_id in range(camera_params['n_cameras']):
            if self.camera_type in ['rgb', 'both']:
                img_callback_rgb = self.__image_callback_wrapper(
                    camera_id, 'rgb')
                topic_name_rgb = '/robot/camera_rgb_%02i' % (camera_id, )
                setattr(
                    self, '__image_rgb_%02i_sub' % (camera_id, ),
                    rospy.Subscriber(topic_name_rgb,
                                     Image,
                                     img_callback_rgb,
                                     queue_size=1))
            if self.camera_type in ['dvs', 'both']:
                img_callback_dvs = self.__image_callback_wrapper(
                    camera_id, 'dvs')
                topic_name_dvs = '/robot/camera_dvs_%02i/events' % (
                    camera_id, )
                setattr(
                    self, '__image_dvs_%02i_sub' % (camera_id, ),
                    rospy.Subscriber(topic_name_dvs,
                                     EventArray,
                                     img_callback_dvs,
                                     queue_size=1))

        self.__set_model_state_pub = rospy.Publisher('/gazebo/set_model_state',
                                                     ModelState,
                                                     queue_size=1)

        rospy.wait_for_service('gazebo/get_model_state', 10.0)
        self.__get_model_state_srv = rospy.ServiceProxy(
            'gazebo/get_model_state', GetModelState)

        # 3D voxel to 2D camera pixel projection parameters (useful for segmentation dataset)
        self.cam_transform = []
        for camera_id in range(camera_params['n_cameras']):
            cam_pose = self.get_object_pose('camera_%02i' % (camera_id))
            cam_pos = cam_pose.position
            cam_ori = cam_pose.orientation
            roll, pitch, yaw = euler_from_quaternion(
                [cam_ori.x, cam_ori.y, cam_ori.z, cam_ori.w])
            (roll, pitch, yaw) = (angle_in_rad * 180 / np.pi
                                  for angle_in_rad in (roll, pitch, yaw))

            cam_projection = ct.RectilinearProjection(
                focallength_px=camera_params['focal_length_px'],
                image=camera_params['camera_resolution'])
            cam_orientation = ct.SpatialOrientation(pos_x_m=cam_pos.x,
                                                    pos_y_m=cam_pos.y,
                                                    elevation_m=cam_pos.z,
                                                    roll_deg=roll,
                                                    tilt_deg=90 - pitch,
                                                    heading_deg=90 - yaw)
            cam_lens = ct.BrownLensDistortion(k1=0.0,
                                              k2=0.0,
                                              k3=0.0,
                                              projection=cam_projection)
            self.cam_transform.append(
                ct.Camera(projection=cam_projection,
                          orientation=cam_orientation,
                          lens=cam_lens))

        rospy.init_node('cdp4_data_collection')
        rospy.wait_for_service('/gazebo/get_world_properties')

        self.__physics_state = rospy.ServiceProxy(
            '/gazebo/get_world_properties', GetWorldProperties)

        while self.__physics_state().sim_time < 2:
            print('Waiting for simulation to be started')
            rospy.sleep(2)

        self.__path_to_models = os.getenv('HOME') + '/.gazebo/models/'

        rospy.wait_for_service('gazebo/set_model_state', 10.0)
        self.__set_model_state_srv = rospy.ServiceProxy(
            'gazebo/set_model_state', SetModelState)

        rospy.wait_for_service('gazebo/spawn_sdf_entity')
        self.__spawn_model_srv = rospy.ServiceProxy('/gazebo/spawn_sdf_entity',
                                                    SpawnEntity)

        rospy.wait_for_service('gazebo/set_visual_properties')
        self.__change_color_srv = rospy.ServiceProxy(
            'gazebo/set_visual_properties', SetVisualProperties)
Exemple #10
0
def camera(draw, projection=projection(), orientation=orientation()):
    return ct.Camera(projection=draw(projection),
                     orientation=draw(orientation))
    def __init__(self):
        QtWidgets.QWidget.__init__(self)

        # the camera
        self.camera = ct.Camera(ct.RectilinearProjection(), lens=ct.BrownLensDistortion())
        # the list of loaded images
        self.images = []

        # widget layout and elements
        self.setMinimumWidth(1300)
        self.setMinimumHeight(400)
        self.setWindowTitle("Camera Calibrator")

        # add the main layout
        main_layout = QtWidgets.QHBoxLayout(self)
        self.setLayout(main_layout)

        """ the left pane with the image list """
        layout = QtWidgets.QVBoxLayout(self)
        main_layout.addLayout(layout)

        # the table
        self.tableWidget = MyQTable()
        layout.addWidget(self.tableWidget)

        self.tableWidget.itemSelectionChanged.connect(self.imageSelected)
        self.tableWidget.cellDoubleClicked.connect(self.imageDoubleClicked)
        self.tableWidget.setColumnCount(3)
        self.tableWidget.setHorizontalHeaderLabels(["File", "Processed", "Use"])
        self.tableWidget.setMinimumWidth(350)

        # the button layout
        self.button_layout = QtWidgets.QHBoxLayout()
        layout.addLayout(self.button_layout)

        # the batch loader
        self.input_filename = QtShortCuts.QInputFilename(self.button_layout, None, os.getcwd(), button_text="Add Images",
                                                         file_type="Images (*.jpg *.png)", existing=True, just_button=True)
        self.input_filename.valueChanged.connect(self.loadBatch)

        # clear button
        self.button_clear = QtWidgets.QPushButton("Clear Images")
        self.button_clear.clicked.connect(self.clearImages)
        self.button_layout.addWidget(self.button_clear)

        # process button
        self.button_start = QtWidgets.QPushButton("Process")
        self.button_start.clicked.connect(self.processImages)
        self.button_layout.addWidget(self.button_start)

        """ the middle pane with the image display """

        layout = QtWidgets.QVBoxLayout(self)
        main_layout.addLayout(layout)

        # view/scene setup
        self.view = QExtendedGraphicsView(dropTarget=self)
        layout.addWidget(self.view)
        self.origin = self.view.origin

        self.preview_pixMapItem = QtWidgets.QGraphicsPixmapItem(self.origin)

        self.view2 = QExtendedGraphicsView(dropTarget=self)
        layout.addWidget(self.view2)
        self.origin2 = self.view2.origin

        self.preview_pixMapItem2 = QtWidgets.QGraphicsPixmapItem(self.origin2)

        self.input_display_undistorted = QtShortCuts.QInputBool(layout, "Display Corrected (D)", True)
        self.input_display_undistorted.valueChanged.connect(self.displayImage)

        self.progessBar = QtWidgets.QProgressBar()
        layout.addWidget(self.progessBar)

        """ the right pane with the fit data """

        layout = QtWidgets.QVBoxLayout(self)
        main_layout.addLayout(layout)

        # calibration flags
        self.input_fix_center = QtShortCuts.QInputBool(layout, "Fit Center")
        self.input_fix_aspect = QtShortCuts.QInputBool(layout, "Fit Aspect Ratio")
        self.input_fix_k1 = QtShortCuts.QInputBool(layout, "Fit K1", True)
        self.input_fix_k2 = QtShortCuts.QInputBool(layout, "Fit K2", True)
        self.input_fix_k3 = QtShortCuts.QInputBool(layout, "Fit K3", True)

        # start calibration button
        self.button_fit = QtWidgets.QPushButton("Fit Calibration")
        self.button_fit.clicked.connect(self.fitCalibration)
        layout.addWidget(self.button_fit)

        # a label to display the results
        self.calibration_result = QtWidgets.QLabel()
        layout.addWidget(self.calibration_result)

        # a strech
        layout.addStretch()

        # the plot of the result
        self.plot = QtShortCuts.MatplotlibWidget()
        layout.addWidget(self.plot)

        # the button layout
        self.button_layout2 = QtWidgets.QHBoxLayout()
        layout.addLayout(self.button_layout2)

        # save button
        self.button_save = QtShortCuts.QInputFilename(self.button_layout2, None, os.getcwd(),
                                                         button_text="Save Calibration",
                                                         file_type="Text File (*.txt)", existing=False,
                                                         just_button=True)
        self.button_save.valueChanged.connect(self.saveCalibration)
        self.button_save.setDisabled(True)

        # load button
        self.button_load = QtShortCuts.QInputFilename(self.button_layout2, None, os.getcwd(),
                                                      button_text="Load Calibration",
                                                      file_type="Text File (*.txt)", existing=True,
                                                      just_button=True)
        self.button_load.valueChanged.connect(self.loadCalibration)