Exemple #1
0
    def getTargetPoseGuess(self, timestamp, cameras, baselines_HL=[]):
        #go through all camera that see this target at the given time
        #and take the one with the most target points
        camids = list()
        numcorners = list()
        for cam_id in self.obs_db.getCamIdsAtTimestamp(timestamp):
            camids.append(cam_id)
            numcorners.append(
                len(self.obs_db.getCornerIdsAtTime(timestamp, cam_id)))

        #get the pnp solution of the cam that sees most target corners
        max_idx = numcorners.index(max(numcorners))
        cam_id_max = camids[max_idx]

        #solve the pnp problem
        camera_geomtry = cameras[cam_id_max].geometry
        success, T_t_cN = camera_geomtry.estimateTransformation(
            self.obs_db.getObservationAtTime(timestamp, cam_id_max))

        if not success:
            sm.logWarn(
                "getTargetPoseGuess: solvePnP failed with solution: {0}".
                format(T_t_cN))

        #transform it back to cam0 (T_t_cN --> T_t_c0)
        T_cN_c0 = sm.Transformation()

        for baseline_HL in baselines_HL[0:cam_id_max]:
            T_cN_c0 = baseline_HL * T_cN_c0

        T_t_c0 = T_t_cN * T_cN_c0

        return T_t_c0
Exemple #2
0
    def getTargetPoseGuess(self, timestamp, cameras, baselines_HL=[]):
        #go through all camera that see this target at the given time
        #and take the one with the most target points        
        camids = list()
        numcorners = list()
        for cam_id in self.obs_db.getCamIdsAtTimestamp(timestamp):
            camids.append(cam_id)
            numcorners.append( len(self.obs_db.getCornerIdsAtTime(timestamp, cam_id)) )

        #get the pnp solution of the cam that sees most target corners
        max_idx = numcorners.index(max(numcorners))
        cam_id_max = camids[max_idx]        
        
        #solve the pnp problem
        camera_geomtry = cameras[cam_id_max].geometry
        success, T_t_cN = camera_geomtry.estimateTransformation(self.obs_db.getObservationAtTime(timestamp, cam_id_max))
               
        if not success:
            sm.logWarn("getTargetPoseGuess: solvePnP failed with solution: {0}".format(T_t_cN))
        
        #transform it back to cam0 (T_t_cN --> T_t_c0)
        T_cN_c0 = sm.Transformation()
        for baseline_HL in baselines_HL[0:cam_id_max]:
            T_cN_c0 = baseline_HL * T_cN_c0
        
        T_t_c0 = T_t_cN * T_cN_c0
        
        return T_t_c0
Exemple #3
0
    def __generateExtrinsicsInitialGuess(self):
        """Estimate the pose of the camera with a PnP solver. Call after initializing the intrinsics"""
        # estimate and set T_c in the observations
        for idx, observation in enumerate(self.__observations):
            (success, T_t_c) = self.__camera.estimateTransformation(observation)
            if (success):
                observation.set_T_t_c(T_t_c)
            else:
                sm.logWarn("Could not estimate T_t_c for observation at index" . idx)

        return
Exemple #4
0
    def __generateExtrinsicsInitialGuess(self):
        """Estimate the pose of the camera with a PnP solver. Call after initializing the intrinsics"""
        # estimate and set T_c in the observations
        for idx, observation in enumerate(self.__observations):
            (success, T_t_c) = self.__camera.estimateTransformation(observation)
            if (success):
                observation.set_T_t_c(T_t_c)
            else:
                sm.logWarn("Could not estimate T_t_c for observation at index {0}".format(idx))

        return
Exemple #5
0
    def truncateIndicesFromTime(self, indices, bag_from_to):
        #get the timestamps
        timestamps = list()
        for idx in self.indices:
            topic, data, stamp = self.bag._read_message(
                self.index[idx].position)
            timestamp = data.header.stamp.secs + data.header.stamp.nsecs / 1.0e9
            timestamps.append(timestamp)

        bagstart = min(timestamps)
        baglength = max(timestamps) - bagstart

        #some value checking
        if bag_from_to[0] >= bag_from_to[1]:
            raise RuntimeError(
                "Bag start time must be bigger than end time.".format(
                    bag_from_to[0]))
        if bag_from_to[0] < 0.0:
            sm.logWarn("Bag start time of {0} s is smaller 0".format(
                bag_from_to[0]))
        if bag_from_to[1] > baglength:
            sm.logWarn(
                "Bag end time of {0} s is bigger than the total length of {1} s"
                .format(bag_from_to[1], baglength))

        #find the valid timestamps
        valid_indices = []
        for idx, timestamp in enumerate(timestamps):
            if timestamp >= (bagstart + bag_from_to[0]) and timestamp <= (
                    bagstart + bag_from_to[1]):
                valid_indices.append(idx)
        sm.logWarn("BagImageDatasetReader: truncated {0} / {1} images.".format(
            len(indices) - len(valid_indices), len(indices)))
        return valid_indices
  def truncateIndicesFromTime(self, indices, bag_from_to):
    # get the timestamps
    timestamps = list()
    for idx in self.indices:
      topic, data, stamp = self.bag._read_message(self.index[idx].position)
      timestamp = data.header.stamp.secs + data.header.stamp.nsecs / 1.0e9
      timestamps.append(timestamp)

    bagstart = min(timestamps)
    baglength = max(timestamps) - bagstart

    # some value checking
    if bag_from_to[0] >= bag_from_to[1]:
      raise RuntimeError("Bag start time must be bigger than end time.".format(bag_from_to[0]))
    if bag_from_to[0] < 0.0:
      sm.logWarn("Bag start time of {0} s is smaller 0".format(bag_from_to[0]))
    if bag_from_to[1] > baglength:
      sm.logWarn("Bag end time of {0} s is bigger than the total length of {1} s".format(
          bag_from_to[1], baglength))

    # find the valid timestamps
    valid_indices = []
    for idx, timestamp in enumerate(timestamps):
      if timestamp >= (bagstart + bag_from_to[0]) and timestamp <= (bagstart + bag_from_to[1]):
        valid_indices.append(idx)
    sm.logWarn(
        "BagImageDatasetReader: truncated {0} / {1} images.".format(len(indices) - len(valid_indices), len(indices)))
    return valid_indices
Exemple #7
0
    def truncateIndicesFromFreq(self, indices, freq):

        # some value checking
        if freq < 0.0:
            raise RuntimeError("Frequency {0} Hz is smaller 0".format(freq))

        # find the valid timestamps
        timestamp_last = -1
        valid_indices = []
        for idx in self.indices:
            topic, data, stamp = self.bag._read_message(
                self.index[idx].position)
            timestamp = data.header.stamp.secs + data.header.stamp.nsecs / 1.0e9
            if timestamp_last < 0.0:
                timestamp_last = timestamp
                valid_indices.append(idx)
                continue
            if (timestamp - timestamp_last) >= 1.0 / freq:
                timestamp_last = timestamp
                valid_indices.append(idx)
        sm.logWarn(
            "BagImageDatasetReader: truncated {0} / {1} images (frequency)".
            format(len(indices) - len(valid_indices), len(indices)))
        return valid_indices
Exemple #8
0
def figure_setup(scenes, plots=set(['scene', 'expectations_vs_demonstrations', 'feature_weights_progress', 'occupancy', \
                                    'feature_histogram', 'spline_parameter_distribution', 'spline_parameter_autocorrelation', \
                                    'feature_value_autocorrelation', 'parameter_trace', 'feature_expectation_trace'])):
    figuresPerScene = []
    axesPerScene = []
    dec = int(np.floor(np.log10(len(scenes)))) + 1
    for cnt, scene in enumerate(scenes):

        figuresThisScene = []

        axes = {}

        if 'scene' in plots:
            figuresThisScene.append(
                pl.figure(figsize=(20., 12.)))  # new figure per demonstration
            figuresThisScene[-1].canvas.set_window_title('scene_{0}'.format(
                str(cnt).zfill(dec)))
            ax = figuresThisScene[-1].add_subplot(111)
            ax.grid('on')
            ax.set_xlabel('x [m]')
            ax.set_ylabel('y [m]')
            grid = None
            for o in reversed(scene.getObservations()):
                grid = o.occupancyGrid
                if not grid is None: break
            if not grid is None:
                try:
                    from planner_algorithms import distanceTransform
                    dt = distanceTransform(grid)
                    pplot.plotGrid(dt,
                                   ax,
                                   cmap='Greys_r',
                                   interpolation='none',
                                   mask_value=np.NAN,
                                   rotate_args={
                                       'mode': 'constant',
                                       'order': 0,
                                       'cval': np.NAN
                                   })
                except ImportError:
                    sm.logWarn(
                        "Cannot import planner_algorithms, not showing distance transform"
                    )
            pplot.plotScene(scene, ax, show_radii=False, show_observations=True, show_observation_ellipses=False, show_gridmaps=True, colormap=cmAgents, alpha_obs_ellipses=0.05, traj_args={'linewidth': 2.0}, \
                            arrow_head_width=0.2, grid_args={'plot_border': True, 'plot_origin': True}, grid_rotate_args={'mode': 'constant', 'order': 0, 'cval': pp.OccupancyValue.FREE})
            ax.legend(loc='best', numpoints=1, fancybox=True, framealpha=0.5)
            ax.set_aspect('equal')
            axes['scene'] = ax

        if 'expectations_vs_demonstrations' in plots:
            figuresThisScene.append(
                pl.figure(figsize=(20., 12.)))  # new figure per demonstration
            figuresThisScene[-1].canvas.set_window_title(
                'expectations_vs_demonstrations_scene{0}'.format(
                    str(cnt).zfill(dec)))
            ax = figuresThisScene[-1].add_subplot(111)  # Expectations
            ax.grid('on')
            ax.set_ylabel('feature value')
            axes['expectations_vs_demonstrations'] = ax

        if 'feature_weights_progress' in plots and cnt == 0:  # Note: This will be the same for all plots
            figuresThisScene.append(
                pl.figure(figsize=(20., 12.)))  # new figure per demonstration
            figuresThisScene[-1].canvas.set_window_title(
                'feature_weights_progress')
            ax = figuresThisScene[-1].add_subplot(111)  # Gradient norm
            ax.grid('on', which='both', axis='y')
            ax.grid('on', which='major', axis='x')
            ax.set_xlabel('iteration')
            ax.set_ylabel('gradient norm')
            ax.set_yscale('log')
            ax.get_xaxis().set_major_locator(pl.MaxNLocator(integer=True))
            ax.tick_params(axis='y', which='both')
            ax.yaxis.set_major_formatter(pl.FormatStrFormatter("%.1f"))
            ax.yaxis.set_minor_formatter(pl.FormatStrFormatter("%.1f"))
            ax2 = ax.twinx()
            ax2.set_ylabel('feature weight')
            ax2.grid('on', which='major', axis='y')
            axes['feature_weights_progress'] = [ax, ax2]

        if 'occupancy' in plots:
            figuresThisScene.append(pl.figure(figsize=(20., 12.)))
            figuresThisScene[-1].canvas.set_window_title('occupancy')
            ax = figuresThisScene[-1].add_subplot(111)
            ax.grid('on')
            ax.set_xlabel('x')
            ax.set_ylabel('y')
            axes['occupancy'] = ax

        if 'feature_histogram' in plots:
            figuresThisScene.append(pl.figure(figsize=(20., 12.)))
            figuresThisScene[-1].canvas.set_window_title(
                'feature_histogram_scene{0}'.format(str(cnt).zfill(dec)))
            ax = figuresThisScene[-1].add_subplot(111)
            ax.grid('on')
            ax.set_xlabel('feature value')
            ax.set_ylabel('probability')
            axes['feature_histogram'] = ax

        if 'spline_parameter_distribution' in plots:
            nAxes = scene.numActiveSplineParameters
            fig1, axs = pplot.squareTileAxesLayout(nAxes, figsize=(20., 12.))
            figuresThisScene.append(fig1)
            figuresThisScene[-1].canvas.set_window_title(
                'spline_parameter_distribution_scene{0}'.format(
                    str(cnt).zfill(dec)))
            axs = putil.flatten(axs)
            axes['spline_parameter_distribution'] = axs
            figuresThisScene[-1].tight_layout()

        if 'spline_parameter_autocorrelation' in plots:
            nAxes = scene.numActiveSplineParameters
            fig2, axs = pplot.squareTileAxesLayout(nAxes, figsize=(20., 12.))
            figuresThisScene.append(fig2)
            axs = putil.flatten(axs)
            for ax in axs:
                ax.grid('on')
            figuresThisScene[-1].canvas.set_window_title(
                'spline_parameter_autocorrelation_scene{0}'.format(
                    str(cnt).zfill(dec)))
            axes['spline_parameter_autocorrelation'] = axs
            figuresThisScene[-1].tight_layout()

        if 'feature_value_autocorrelation' in plots:
            nAxes = len(featureContainer.getContainer())
            fig3, axs = pplot.squareTileAxesLayout(nAxes, figsize=(20., 12.))
            figuresThisScene.append(fig3)
            axs = putil.flatten(axs)
            for ax in axs:
                ax.grid('on')
            figuresThisScene[-1].canvas.set_window_title(
                'feature_value_autocorrelation_scene{0}'.format(
                    str(cnt).zfill(dec)))
            axes['feature_value_autocorrelation'] = axs
            figuresThisScene[-1].tight_layout()

        if 'parameter_trace' in plots:
            nAxes = scene.numActiveSplineParameters
            fig1, axs = pplot.squareTileAxesLayout(nAxes, figsize=(20., 12.))
            figuresThisScene.append(fig1)
            figuresThisScene[-1].canvas.set_window_title(
                'parameter_trace_scene{0}'.format(str(cnt).zfill(dec)))
            axs = putil.flatten(axs)
            axes['parameter_trace'] = axs
            figuresThisScene[-1].tight_layout()

        if 'feature_expectation_trace' in plots:
            nAxes = len(featureContainer.getContainer())
            fig1, axs = pplot.squareTileAxesLayout(nAxes, figsize=(20., 12.))
            figuresThisScene.append(fig1)
            figuresThisScene[-1].canvas.set_window_title(
                'feature_expectation_trace_scene{0}'.format(
                    str(cnt).zfill(dec)))
            axs = putil.flatten(axs)
            for ax in axs:
                ax.grid('on')
            axes['feature_expectation_trace'] = axs
            figuresThisScene[-1].tight_layout()

        axesPerScene.append(axes)
        figuresPerScene.append(figuresThisScene)

    return figuresPerScene, axesPerScene
Exemple #9
0
    def getInitialGuesses(self, cameras):

        if not self.G:
            raise RuntimeError("Graph is uninitialized!")

        #################################################################
        ## STEP 0: check if all cameras in the chain are connected
        ##         through common target point observations
        ##         (=all vertices connected?)
        #################################################################
        if not self.isGraphConnected():
            sm.logError(
                "The cameras are not connected through mutual target observations! "
                "Please provide another dataset...")

            self.plotGraph()
            sys.exit(0)

        #################################################################
        ## STEP 1: get baseline initial guesses by calibrating good
        ##         camera pairs using a stereo calibration
        ##
        #################################################################

        #first we need to find the best camera pairs to obtain the initial guesses
        #--> use the pairs that share the most common observed target corners
        #The graph is built with weighted edges that represent the number of common
        #target corners, so we can use dijkstras algorithm to get the best pair
        #configuration for the initial pair calibrations
        weights = [1.0 / commonPoints for commonPoints in self.G.es["weight"]]

        #choose the cam with the least edges as base_cam
        outdegrees = self.G.vs.outdegree()
        base_cam_id = outdegrees.index(min(outdegrees))

        #solve for shortest path  (=optimal transformation chaining)
        edges_on_path = self.G.get_shortest_paths(0,
                                                  weights=weights,
                                                  output="epath")

        self.optimal_baseline_edges = set(
            [item for sublist in edges_on_path for item in sublist])

        #################################################################
        ## STEP 2: solve stereo calibration problem for the baselines
        ##         (baselines are always from lower_id to higher_id cams!)
        #################################################################

        #calibrate all cameras in pairs
        for baseline_edge_id in self.optimal_baseline_edges:

            #get the cam_nrs from the graph edge (calibrate from low to high id)
            vertices = self.G.es[baseline_edge_id].tuple
            if vertices[0] < vertices[1]:
                camL_nr = vertices[0]
                camH_nr = vertices[1]
            else:
                camL_nr = vertices[1]
                camH_nr = vertices[0]

            print "\t initializing camera pair ({0},{1})...  ".format(
                camL_nr, camH_nr)

            #run the pair extrinsic calibration
            obs_list = self.obs_db.getAllObsTwoCams(camL_nr, camH_nr)
            success, baseline_HL = kcc.stereoCalibrate(cameras[camL_nr],
                                                       cameras[camH_nr],
                                                       obs_list,
                                                       distortionActive=False)

            if success:
                sm.logDebug("baseline_{0}_{1}={2}".format(
                    camL_nr, camH_nr, baseline_HL.T()))
            else:
                sm.logError(
                    "initialization of camera pair ({0},{1}) failed  ".format(
                        camL_nr, camH_nr))
                sm.logError("estimated baseline_{0}_{1}={2}".format(
                    camL_nr, camH_nr, baseline_HL.T()))

            #store the baseline in the graph
            self.G.es[self.G.get_eid(camL_nr,
                                     camH_nr)]["baseline_HL"] = baseline_HL

        #################################################################
        ## STEP 3: transform from the "optimal" baseline chain to camera chain ordering
        ##         (=> baseline_0 = T_c1_c0 |
        #################################################################

        #construct the optimal path graph
        G_optimal_baselines = self.G.copy()

        eid_not_optimal_path = set(range(0, len(G_optimal_baselines.es)))
        for eid in self.optimal_baseline_edges:
            eid_not_optimal_path.remove(eid)
        G_optimal_baselines.delete_edges(eid_not_optimal_path)

        #now we convert the arbitary baseline graph to baselines starting from
        # cam0 and traverse the chain (cam0->cam1->cam2->camN)
        baselines = []
        for baseline_id in range(0, self.numCams - 1):
            #find the shortest path on the graph
            path = G_optimal_baselines.get_shortest_paths(
                baseline_id, baseline_id + 1)[0]

            #get the baseline from cam with id baseline_id to baseline_id+1
            baseline_HL = sm.Transformation()
            for path_idx in range(0, len(path) - 1):
                source_vert = path[path_idx]
                target_vert = path[path_idx + 1]
                T_edge = self.G.es[self.G.get_eid(source_vert,
                                                  target_vert)]["baseline_HL"]

                #correct the direction (baselines always from low to high cam id!)
                T_edge = T_edge if source_vert < target_vert else T_edge.inverse(
                )

                #chain up
                baseline_HL = T_edge * baseline_HL

            #store in graph
            baselines.append(baseline_HL)

        #################################################################
        ## STEP 4: refine guess in full batch
        #################################################################
        success, baselines = kcc.solveFullBatch(cameras, baselines, self)

        if not success:
            sm.logWarn("Full batch refinement failed!")

        return baselines
Exemple #10
0
    def getInitialGuesses(self, cameras):
        
        if not self.G:
            raise RuntimeError("Graph is uninitialized!")
        
        #################################################################
        ## STEP 0: check if all cameras in the chain are connected
        ##         through common target point observations
        ##         (=all vertices connected?)
        #################################################################
        if not self.isGraphConnected():
            sm.logError("The cameras are not connected through mutual target observations! " 
                        "Please provide another dataset...")
            
            self.plotGraph()
            sys.exit(0)
        
        #################################################################
        ## STEP 1: get baseline initial guesses by calibrating good 
        ##         camera pairs using a stereo calibration
        ## 
        #################################################################

        #first we need to find the best camera pairs to obtain the initial guesses
        #--> use the pairs that share the most common observed target corners
        #The graph is built with weighted edges that represent the number of common
        #target corners, so we can use dijkstras algorithm to get the best pair
        #configuration for the initial pair calibrations
        weights = [1.0/commonPoints for commonPoints in self.G.es["weight"]]

        #choose the cam with the least edges as base_cam
        outdegrees = self.G.vs.outdegree()
        base_cam_id = outdegrees.index(min(outdegrees))

        #solve for shortest path  (=optimal transformation chaining)
        edges_on_path = self.G.get_shortest_paths(0, weights=weights, output="epath")
        
        self.optimal_baseline_edges = set([item for sublist in edges_on_path for item in sublist])
        
        
        #################################################################
        ## STEP 2: solve stereo calibration problem for the baselines
        ##         (baselines are always from lower_id to higher_id cams!)
        #################################################################
        
        #calibrate all cameras in pairs
        for baseline_edge_id in self.optimal_baseline_edges:

            #get the cam_nrs from the graph edge (calibrate from low to high id)
            vertices = self.G.es[baseline_edge_id].tuple
            if vertices[0]<vertices[1]:
                camL_nr = vertices[0]
                camH_nr = vertices[1]
            else:
                camL_nr = vertices[1]
                camH_nr = vertices[0]
            
            print "\t initializing camera pair ({0},{1})...  ".format(camL_nr, camH_nr)          

            #run the pair extrinsic calibration
            obs_list = self.obs_db.getAllObsTwoCams(camL_nr, camH_nr)
            success, baseline_HL = kcc.stereoCalibrate(cameras[camL_nr], 
                                                       cameras[camH_nr], 
                                                       obs_list,
                                                       distortionActive=False)
            
            if success:
                sm.logDebug("baseline_{0}_{1}={2}".format(camL_nr, camH_nr, baseline_HL.T()))
            else:
                sm.logError("initialization of camera pair ({0},{1}) failed  ".format(camL_nr, camH_nr))
                sm.logError("estimated baseline_{0}_{1}={2}".format(camL_nr, camH_nr, baseline_HL.T()))
        
            #store the baseline in the graph
            self.G.es[ self.G.get_eid(camL_nr, camH_nr) ]["baseline_HL"] = baseline_HL
        
        #################################################################
        ## STEP 3: transform from the "optimal" baseline chain to camera chain ordering
        ##         (=> baseline_0 = T_c1_c0 | 
        #################################################################
        
        #construct the optimal path graph
        G_optimal_baselines = self.G.copy()
        
        eid_not_optimal_path = set(range(0,len(G_optimal_baselines.es)))
        for eid in self.optimal_baseline_edges:
            eid_not_optimal_path.remove(eid)
        G_optimal_baselines.delete_edges( eid_not_optimal_path )
        
        #now we convert the arbitary baseline graph to baselines starting from 
        # cam0 and traverse the chain (cam0->cam1->cam2->camN)
        baselines = []
        for baseline_id in range(0, self.numCams-1):
            #find the shortest path on the graph
            path = G_optimal_baselines.get_shortest_paths(baseline_id, baseline_id+1)[0]
            
            #get the baseline from cam with id baseline_id to baseline_id+1
            baseline_HL = sm.Transformation()
            for path_idx in range(0, len(path)-1):
                source_vert = path[path_idx]
                target_vert = path[path_idx+1]
                T_edge = self.G.es[ self.G.get_eid(source_vert, target_vert) ]["baseline_HL"]
            
                #correct the direction (baselines always from low to high cam id!)
                T_edge = T_edge if source_vert<target_vert else T_edge.inverse()
            
                #chain up
                baseline_HL = T_edge * baseline_HL
            
            #store in graph
            baselines.append(baseline_HL)
 
        #################################################################
        ## STEP 4: refine guess in full batch
        #################################################################
        success, baselines = kcc.solveFullBatch(cameras, baselines, self)
        
        if not success:
            sm.logWarn("Full batch refinement failed!")
    
        return baselines
Exemple #11
0
    def __buildOptimizationProblem(self, W):
        """Build the optimisation problem"""
        problem = inc.CalibrationOptimizationProblem()

        # Initialize all design variables.
        self.__initPoseDesignVariables(problem)

        #####
        ## build error terms and add to problem

        # store all frames
        self.__frames = []
        self.__reprojection_errors = []

        # This code assumes that the order of the landmarks in the observations
        # is invariant across all observations. At least for the chessboards it is true.

        #####
        # add all the landmarks once
        landmarks = []
        landmarks_expr = {}
        keypoint_ids0 = self.__observations[0].getCornersIdx()
        for idx, landmark in enumerate(self.__observations[0].getCornersTargetFrame()):
            # design variable for landmark
            landmark_w_dv = aopt.HomogeneousPointDv(sm.toHomogeneous(landmark))
            landmark_w_dv.setActive(self.__config.estimateParameters['landmarks']);
            landmarks.append(landmark_w_dv)
            landmarks_expr[keypoint_ids0[idx]] = landmark_w_dv.toExpression()
            problem.addDesignVariable(landmark_w_dv, CALIBRATION_GROUP_ID)

        #####
        # activate design variables
        self.__camera_dv.setActive(
            self.__config.estimateParameters['intrinsics'],
            self.__config.estimateParameters['distortion'],
            self.__config.estimateParameters['shutter']
        )

        #####
        # Add design variables

        # add the camera design variables last for optimal sparsity patterns
        problem.addDesignVariable(self.__camera_dv.shutterDesignVariable(), CALIBRATION_GROUP_ID)
        problem.addDesignVariable(self.__camera_dv.projectionDesignVariable(), CALIBRATION_GROUP_ID)
        problem.addDesignVariable(self.__camera_dv.distortionDesignVariable(), CALIBRATION_GROUP_ID)

        #####
        # Regularization term / motion prior
        motionError = asp.BSplineMotionError(self.__poseSpline_dv, W)
        problem.addErrorTerm(motionError)

        #####
        # add a reprojection error for every corner of each observation
        for frameid, observation in enumerate(self.__observations):
            # only process successful observations of a pattern
            if (observation.hasSuccessfulObservation()):
                # add a frame
                frame = self.__cameraModelFactory.frameType()
                frame.setGeometry(self.__camera)
                frame.setTime(observation.time())
                self.__frames.append(frame)

                #####
                # add an error term for every observed corner
                for index, point in enumerate(observation.getCornersImageFrame()):

                    # keypoint time offset by line delay as expression type
                    keypoint_time = self.__camera_dv.keypointTime(frame.time(), point)

                    # from target to world transformation.
                    T_w_t = self.__poseSpline_dv.transformationAtTime(
                        keypoint_time,
                        self.__config.timeOffsetConstantSparsityPattern,
                        self.__config.timeOffsetConstantSparsityPattern
                    )
                    T_t_w = T_w_t.inverse()

                    # we only have the the first image's design variables
                    # so any landmark that is not in that frame won't be in the problem
                    # thus we must skip those measurements that are of a keypoint that isn't visible
                    keypoint_ids = observation.getCornersIdx()
                    if not np.any(keypoint_ids[index]==keypoint_ids0):
                       sm.logWarn("landmark {0} in frame {1} not in first frame".format(keypoint_ids[index],frameid))
                       continue

                    # transform target point to camera frame
                    p_t = T_t_w * landmarks_expr[keypoint_ids[index]]

                    # create the keypoint
                    keypoint_index = frame.numKeypoints()
                    keypoint = acv.Keypoint2()
                    keypoint.setMeasurement(point)
                    inverseFeatureCovariance = self.__config.inverseFeatureCovariance;
                    keypoint.setInverseMeasurementCovariance(np.eye(len(point)) * inverseFeatureCovariance)
                    keypoint.setLandmarkId(keypoint_index)
                    frame.addKeypoint(keypoint)

                    # create reprojection error
                    reprojection_error = self.__buildErrorTerm(
                        frame,
                        keypoint_index,
                        p_t,
                        self.__camera_dv,
                        self.__poseSpline_dv
                    )
                    self.__reprojection_errors.append(reprojection_error)
                    problem.addErrorTerm(reprojection_error)

        return problem