示例#1
0
    def export(self,
               reconstruction,
               graph,
               data,
               image_extension='jpg',
               scale_focal=1.0):
        lines = ['NVM_V3', '', str(len(reconstruction.shots))]
        for shot in reconstruction.shots.values():
            q = tf.quaternion_from_matrix(shot.pose.get_rotation_matrix())
            o = shot.pose.get_origin()
            words = [
                self.image_path(shot.id, data, image_extension),
                shot.camera.focal *
                max(shot.camera.width, shot.camera.height) * scale_focal,
                q[0],
                q[1],
                q[2],
                q[3],
                o[0],
                o[1],
                o[2],
                '0',
                '0',
            ]
            lines.append(' '.join(map(str, words)))
        lines += ['0', '', '0', '', '0']

        with io.open_wt(data.data_path + '/reconstruction.nvm') as fout:
            fout.write('\n'.join(lines))
    def export(self, reconstruction, graph, data):
        lines = ['NVM_V3', '', str(len(reconstruction.shots))]
        for shot in reconstruction.shots.values():
            q = tf.quaternion_from_matrix(shot.pose.get_rotation_matrix())
            o = shot.pose.get_origin()
            words = [
                self.image_path(shot.id, data),
                shot.camera.focal * max(shot.camera.width, shot.camera.height),
                q[0], q[1], q[2], q[3],
                o[0], o[1], o[2],
                '0', '0',
            ]
            lines.append(' '.join(map(str, words)))
        lines += ['0', '', '0', '', '0']

        with io.open_wt(data.data_path + '/reconstruction.nvm') as fout:
            fout.write('\n'.join(lines))
示例#3
0
    def export(self, reconstruction, graph, data):
        lines = ['NVM_V3', '', str(len(reconstruction.shots))]
        for shot in reconstruction.shots.values():
            q = tf.quaternion_from_matrix(shot.pose.get_rotation_matrix())
            o = shot.pose.get_origin()
            words = [
                'undistorted/' + shot.id,
                shot.camera.focal * max(shot.camera.width, shot.camera.height),
                q[0],
                q[1],
                q[2],
                q[3],
                o[0],
                o[1],
                o[2],
                '0',
                '0',
            ]
            lines.append(' '.join(map(str, words)))
        lines += ['0', '', '0', '', '0']

        with open(data.data_path + '/reconstruction.nvm', 'w') as fout:
            fout.write('\n'.join(lines))
示例#4
0
    def export(self, reconstruction, graph, data):
        lines = ['NVM_V3', '', str(len(reconstruction.shots))]
        for shot in reconstruction.shots.values():
            q = tf.quaternion_from_matrix(shot.pose.get_rotation_matrix())
            o = shot.pose.get_origin()
            size = max(data.undistorted_image_size(shot.id))
            words = [
                self.image_path(shot.id, data),
                shot.camera.focal * size,
                q[0],
                q[1],
                q[2],
                q[3],
                o[0],
                o[1],
                o[2],
                '0',
                '0',
            ]
            lines.append(' '.join(map(str, words)))
        lines += ['0', '', '0', '', '0']

        with io.open_wt(data.data_path + '/reconstruction.nvm') as fout:
            fout.write('\n'.join(lines))
示例#5
0
    def export(self, reconstruction, tracks_manager, udata, with_points,
               export_only):
        lines = ['NVM_V3', '', len(reconstruction.shots)]
        shot_size_cache = {}
        shot_index = {}
        i = 0
        skipped_shots = 0

        for shot in reconstruction.shots.values():
            if export_only is not None and not shot.id in export_only:
                skipped_shots += 1
                continue

            q = tf.quaternion_from_matrix(shot.pose.get_rotation_matrix())
            o = shot.pose.get_origin()

            shot_size_cache[shot.id] = udata.undistorted_image_size(shot.id)
            shot_index[shot.id] = i
            i += 1
            if shot.camera.projection_type == "brown":
                # Will approximate Brown model, not optimal
                focal_normalized = (shot.camera.focal_x +
                                    shot.camera.focal_y) / 2.0
            else:
                focal_normalized = shot.camera.focal

            words = [
                self.image_path(shot.id, udata),
                focal_normalized * max(shot_size_cache[shot.id]),
                q[0],
                q[1],
                q[2],
                q[3],
                o[0],
                o[1],
                o[2],
                '0',
                '0',
            ]
            lines.append(' '.join(map(str, words)))

        # Adjust shots count
        lines[2] = str(lines[2] - skipped_shots)

        if with_points:
            skipped_points = 0
            lines.append('')
            points = reconstruction.points
            lines.append(len(points))
            points_count_index = len(lines) - 1

            for point_id, point in iteritems(points):
                shots = reconstruction.shots
                coord = point.coordinates
                color = list(map(int, point.color))

                view_line = []
                for shot_key, obs in tracks_manager.get_track_observations(
                        point_id).items():
                    if export_only is not None and not shot_key in export_only:
                        continue

                    if shot_key in shots.keys():
                        v = obs.point
                        x = (0.5 + v[0]) * shot_size_cache[shot_key][1]
                        y = (0.5 + v[1]) * shot_size_cache[shot_key][0]
                        view_line.append(' '.join(
                            map(str, [shot_index[shot_key], obs.id, x, y])))

                if len(view_line) > 1:
                    lines.append(' '.join(map(str, coord)) + ' ' +
                                 ' '.join(map(str, color)) + ' ' +
                                 str(len(view_line)) + ' ' +
                                 ' '.join(view_line))
                else:
                    skipped_points += 1

            # Adjust points count
            lines[points_count_index] = str(lines[points_count_index] -
                                            skipped_points)
        else:
            lines += ['0', '']

        lines += ['0', '', '0']

        with io.open_wt(udata.data_path + '/reconstruction.nvm') as fout:
            fout.write('\n'.join(lines))
示例#6
0
def export_nvm(image_list, reconstructions, track_graph, nvm_file_path):
    """
    Generate a reconstruction file that is consistent with Bundler's format
    """

    #for each reconstruction
    for j, reconstruction in enumerate(reconstructions):

        #setup
        lines = ['NVM_V3','']
        points = reconstruction.points
        shots = reconstruction.shots
        shots_order = {key: i for i, key in enumerate(shots)}#image_list)}

        #for each shot
        lines.append(str(len(shots_order)))#str(len(shots)))       # append number of shots
        for shot_id in shots:
            
            #get data needed for each shot
            shot = shots[shot_id]
            focal = shot.camera.focal * max(shot.camera.width, shot.camera.height)
            o = shot.pose.get_origin()
            q = tf.quaternion_from_matrix(shot.pose.get_rotation_matrix())

            #append line (img, f, )
            lines.append(os.path.join(nvm_file_path,'undistorted',shot_id) + ' ' + str(focal) + ' ' + str(q[0]) + ' ' + str(q[1]) + ' ' + str(q[2]) + ' ' + str(q[3]) + ' ' + str(o[0]) + ' ' + str(o[1]) + ' ' + str(o[2]) + ' 0 0')
        lines.append('')

        #for each point
        lines.append(str(len(points)))
        for point_id, point in points.iteritems():

            #xyz and rgb
            coord = point.coordinates
            color = map(int, point.color)
            line = str(coord[0]) + ' ' + str(coord[1]) + ' ' + str(coord[2]) + ' '
            line += str(color[0]) + ' ' + str(color[1]) + ' ' + str(color[2]) + ' '

            #count number of views that saw a point
            view_list = track_graph[point_id]
            view_line = []
            nobs = 0
            for shot_key, view in view_list.iteritems():
                if shot_key in shots.keys():
                    nobs += 1                            
            
            #if less than 2 or more than total images, skip (for MVEs benefit)
            if nobs < 2 or nobs > len(shots):
                continue
            
            #for each observation
            for shot_key, view in view_list.iteritems():

                #if observation id is in list of shots
                if shot_key in shots.keys():

                    # get u,v for observation
                    v = view['feature']
                    camera = shots[shot_key].camera
                    scale = max(camera.width, camera.height)
                    x = v[0] * scale
                    y = -v[1] * scale

                    # index of observation
                    shot_index = shots_order[shot_key]

                    # append
                    view_line.append(' '.join(map(str, [shot_index, view['feature_id'], x, y])))

            #append line
            line += str(len(view_line)) + ' ' + ' '.join(view_line)
            lines.append(line)
        
        # end model
        lines.append('0')

        # write the NVM
        with open(nvm_file_path + '/reconstruction' + str(j) + '.nvm', 'w') as fout:
            fout.write('\n'.join(lines))
示例#7
0
    def export(self, reconstruction, graph, data, with_points):
        lines = ['NVM_V3', '', str(len(reconstruction.shots))]
        shot_size_cache = {}
        shot_index = {}
        i = 0

        for shot in reconstruction.shots.values():
            q = tf.quaternion_from_matrix(shot.pose.get_rotation_matrix())
            o = shot.pose.get_origin()

            shot_size_cache[shot.id] = data.undistorted_image_size(shot.id)
            shot_index[shot.id] = i
            i += 1

            if type(shot.camera) == types.BrownPerspectiveCamera:
                # Will aproximate Brown model, not optimal
                focal_normalized = shot.camera.focal_x
            else:
                focal_normalized = shot.camera.focal

            words = [
                self.image_path(shot.id, data),
                focal_normalized * max(shot_size_cache[shot.id]),
                q[0],
                q[1],
                q[2],
                q[3],
                o[0],
                o[1],
                o[2],
                '0',
                '0',
            ]
            lines.append(' '.join(map(str, words)))

        if with_points:
            lines.append('')
            points = reconstruction.points
            lines.append(str(len(points)))

            for point_id, point in iteritems(points):
                shots = reconstruction.shots
                coord = point.coordinates
                color = list(map(int, point.color))

                view_list = graph[point_id]
                view_line = []

                for shot_key, view in iteritems(view_list):
                    if shot_key in shots.keys():
                        v = view['feature']
                        x = (0.5 + v[0]) * shot_size_cache[shot_key][1]
                        y = (0.5 + v[1]) * shot_size_cache[shot_key][0]
                        view_line.append(' '.join(
                            map(str, [
                                shot_index[shot_key], view['feature_id'], x, y
                            ])))

                lines.append(' '.join(map(str, coord)) + ' ' +
                             ' '.join(map(str, color)) + ' ' +
                             str(len(view_line)) + ' ' + ' '.join(view_line))
        else:
            lines += ['0', '']

        lines += ['0', '', '0']

        with io.open_wt(data.data_path + '/reconstruction.nvm') as fout:
            fout.write('\n'.join(lines))