Esempio n. 1
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']

        max_dim = find_largest_photo_dim(reconstruction.photos)
        max_texture_size = 8 * 1024 # default

        if max_dim > 8000:
            log.ODM_INFO("Large input images (%s pixels), increasing maximum texture size." % max_dim)
            max_texture_size *= 3

        class nonloc:
            runs = []

        def add_run(nvm_file, primary=True, band=None):
            subdir = ""
            if not primary and band is not None:
                subdir = band
            
            if not args.skip_3dmodel and (primary or args.use_3dmesh):
                nonloc.runs += [{
                    'out_dir': os.path.join(tree.odm_texturing, subdir),
                    'model': tree.odm_mesh,
                    'nadir': False,
                    'primary': primary,
                    'nvm_file': nvm_file,
                    'labeling_file': os.path.join(tree.odm_texturing, "odm_textured_model_geo_labeling.vec") if subdir else None
                }]

            if not args.use_3dmesh:
                nonloc.runs += [{
                    'out_dir': os.path.join(tree.odm_25dtexturing, subdir),
                    'model': tree.odm_25dmesh,
                    'nadir': True,
                    'primary': primary,
                    'nvm_file': nvm_file,
                    'labeling_file': os.path.join(tree.odm_25dtexturing, "odm_textured_model_geo_labeling.vec") if subdir else None
                }]

        if reconstruction.multi_camera:

            for band in reconstruction.multi_camera:
                primary = band['name'] == get_primary_band_name(reconstruction.multi_camera, args.primary_band)
                nvm_file = os.path.join(tree.opensfm, "undistorted", "reconstruction_%s.nvm" % band['name'].lower())
                add_run(nvm_file, primary, band['name'].lower())
            
            # Sort to make sure primary band is processed first
            nonloc.runs.sort(key=lambda r: r['primary'], reverse=True)
        else:
            add_run(tree.opensfm_reconstruction_nvm)
        
        progress_per_run = 100.0 / len(nonloc.runs)
        progress = 0.0

        for r in nonloc.runs:
            if not io.dir_exists(r['out_dir']):
                system.mkdir_p(r['out_dir'])

            odm_textured_model_obj = os.path.join(r['out_dir'], tree.odm_textured_model_obj)

            if not io.file_exists(odm_textured_model_obj) or self.rerun():
                log.ODM_INFO('Writing MVS Textured file in: %s'
                              % odm_textured_model_obj)

                # Format arguments to fit Mvs-Texturing app
                skipGlobalSeamLeveling = ""
                skipLocalSeamLeveling = ""
                keepUnseenFaces = ""
                nadir = ""

                if args.texturing_skip_global_seam_leveling:
                    skipGlobalSeamLeveling = "--skip_global_seam_leveling"
                if args.texturing_skip_local_seam_leveling:
                    skipLocalSeamLeveling = "--skip_local_seam_leveling"
                if args.texturing_keep_unseen_faces:
                    keepUnseenFaces = "--keep_unseen_faces"
                if (r['nadir']):
                    nadir = '--nadir_mode'

                # mvstex definitions
                kwargs = {
                    'bin': context.mvstex_path,
                    'out_dir': os.path.join(r['out_dir'], "odm_textured_model_geo"),
                    'model': r['model'],
                    'dataTerm': args.texturing_data_term,
                    'outlierRemovalType': args.texturing_outlier_removal_type,
                    'skipGlobalSeamLeveling': skipGlobalSeamLeveling,
                    'skipLocalSeamLeveling': skipLocalSeamLeveling,
                    'keepUnseenFaces': keepUnseenFaces,
                    'toneMapping': args.texturing_tone_mapping,
                    'nadirMode': nadir,
                    'maxTextureSize': '--max_texture_size=%s' % max_texture_size,
                    'nvm_file': r['nvm_file'],
                    'intermediate': '--no_intermediate_results' if (r['labeling_file'] or not reconstruction.multi_camera) else '',
                    'labelingFile': '-L "%s"' % r['labeling_file'] if r['labeling_file'] else ''
                }

                mvs_tmp_dir = os.path.join(r['out_dir'], 'tmp')

                # Make sure tmp directory is empty
                if io.dir_exists(mvs_tmp_dir):
                    log.ODM_INFO("Removing old tmp directory {}".format(mvs_tmp_dir))
                    shutil.rmtree(mvs_tmp_dir)

                # run texturing binary
                system.run('"{bin}" "{nvm_file}" "{model}" "{out_dir}" '
                        '-d {dataTerm} -o {outlierRemovalType} '
                        '-t {toneMapping} '
                        '{intermediate} '
                        '{skipGlobalSeamLeveling} '
                        '{skipLocalSeamLeveling} '
                        '{keepUnseenFaces} '
                        '{nadirMode} '
                        '{labelingFile} '
                        '{maxTextureSize} '.format(**kwargs))
                
                # Backward compatibility: copy odm_textured_model_geo.mtl to odm_textured_model.mtl
                # for certain older WebODM clients which expect a odm_textured_model.mtl
                # to be present for visualization
                # We should remove this at some point in the future
                geo_mtl = os.path.join(r['out_dir'], 'odm_textured_model_geo.mtl')
                if io.file_exists(geo_mtl):
                    nongeo_mtl = os.path.join(r['out_dir'], 'odm_textured_model.mtl')
                    shutil.copy(geo_mtl, nongeo_mtl)
                
                progress += progress_per_run
                self.update_progress(progress)
            else:
                log.ODM_WARNING('Found a valid ODM Texture file in: %s'
                                % odm_textured_model_obj)
        
        if args.optimize_disk_space:
            for r in nonloc.runs:
                if io.file_exists(r['model']):
                    os.remove(r['model'])
            
            undistorted_images_path = os.path.join(tree.opensfm, "undistorted", "images")
            if io.dir_exists(undistorted_images_path):
                shutil.rmtree(undistorted_images_path)
Esempio n. 2
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']

        class nonloc:
            runs = []

        def add_run(nvm_file, primary=True, band=None):
            subdir = ""
            if not primary and band is not None:
                subdir = band

            if not args.skip_3dmodel and (primary or args.use_3dmesh):
                nonloc.runs += [{
                    'out_dir':
                    os.path.join(tree.odm_texturing, subdir),
                    'model':
                    tree.odm_mesh,
                    'nadir':
                    False,
                    'primary':
                    primary,
                    'nvm_file':
                    nvm_file,
                    'labeling_file':
                    os.path.join(tree.odm_texturing,
                                 "odm_textured_model_labeling.vec")
                    if subdir else None
                }]

            if not args.use_3dmesh:
                nonloc.runs += [{
                    'out_dir':
                    os.path.join(tree.odm_25dtexturing, subdir),
                    'model':
                    tree.odm_25dmesh,
                    'nadir':
                    True,
                    'primary':
                    primary,
                    'nvm_file':
                    nvm_file,
                    'labeling_file':
                    os.path.join(tree.odm_25dtexturing,
                                 "odm_textured_model_labeling.vec")
                    if subdir else None
                }]

        if reconstruction.multi_camera:

            for band in reconstruction.multi_camera:
                primary = band['name'] == get_primary_band_name(
                    reconstruction.multi_camera, args.primary_band)
                nvm_file = os.path.join(
                    tree.opensfm, "undistorted",
                    "reconstruction_%s.nvm" % band['name'].lower())
                add_run(nvm_file, primary, band['name'].lower())

            # Sort to make sure primary band is processed first
            nonloc.runs.sort(key=lambda r: r['primary'], reverse=True)
        else:
            add_run(tree.opensfm_reconstruction_nvm)

        progress_per_run = 100.0 / len(nonloc.runs)
        progress = 0.0

        for r in nonloc.runs:
            if not io.dir_exists(r['out_dir']):
                system.mkdir_p(r['out_dir'])

            odm_textured_model_obj = os.path.join(r['out_dir'],
                                                  tree.odm_textured_model_obj)

            if not io.file_exists(odm_textured_model_obj) or self.rerun():
                log.ODM_INFO('Writing MVS Textured file in: %s' %
                             odm_textured_model_obj)

                # Format arguments to fit Mvs-Texturing app
                skipGlobalSeamLeveling = ""
                skipLocalSeamLeveling = ""
                nadir = ""

                if (self.params.get('skip_glob_seam_leveling')):
                    skipGlobalSeamLeveling = "--skip_global_seam_leveling"
                if (self.params.get('skip_loc_seam_leveling')):
                    skipLocalSeamLeveling = "--skip_local_seam_leveling"
                if (r['nadir']):
                    nadir = '--nadir_mode'

                # mvstex definitions
                kwargs = {
                    'bin':
                    context.mvstex_path,
                    'out_dir':
                    os.path.join(r['out_dir'], "odm_textured_model"),
                    'model':
                    r['model'],
                    'dataTerm':
                    self.params.get('data_term'),
                    'outlierRemovalType':
                    self.params.get('outlier_rem_type'),
                    'skipGlobalSeamLeveling':
                    skipGlobalSeamLeveling,
                    'skipLocalSeamLeveling':
                    skipLocalSeamLeveling,
                    'toneMapping':
                    self.params.get('tone_mapping'),
                    'nadirMode':
                    nadir,
                    'nvm_file':
                    r['nvm_file'],
                    'intermediate':
                    '--no_intermediate_results' if
                    (r['labeling_file']
                     or not reconstruction.multi_camera) else '',
                    'labelingFile':
                    '-L "%s"' %
                    r['labeling_file'] if r['labeling_file'] else ''
                }

                mvs_tmp_dir = os.path.join(r['out_dir'], 'tmp')

                # Make sure tmp directory is empty
                if io.dir_exists(mvs_tmp_dir):
                    log.ODM_INFO(
                        "Removing old tmp directory {}".format(mvs_tmp_dir))
                    shutil.rmtree(mvs_tmp_dir)

                # run texturing binary
                system.run('{bin} {nvm_file} {model} {out_dir} '
                           '-d {dataTerm} -o {outlierRemovalType} '
                           '-t {toneMapping} '
                           '{intermediate} '
                           '{skipGlobalSeamLeveling} '
                           '{skipLocalSeamLeveling} '
                           '{nadirMode} '
                           '{labelingFile} '.format(**kwargs))

                progress += progress_per_run
                self.update_progress(progress)
            else:
                log.ODM_WARNING('Found a valid ODM Texture file in: %s' %
                                odm_textured_model_obj)

        if args.optimize_disk_space:
            for r in nonloc.runs:
                if io.file_exists(r['model']):
                    os.remove(r['model'])

            undistorted_images_path = os.path.join(tree.opensfm, "undistorted",
                                                   "images")
            if io.dir_exists(undistorted_images_path):
                shutil.rmtree(undistorted_images_path)
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']

        doPointCloudGeo = True
        transformPointCloud = True
        verbose = '-verbose' if self.params.get('verbose') else ''

        class nonloc:
            runs = []

        def add_run(primary=True, band=None):
            subdir = ""
            if not primary and band is not None:
                subdir = band

            # Make sure 2.5D mesh is georeferenced before the 3D mesh
            # Because it will be used to calculate a transform
            # for the point cloud. If we use the 3D model transform,
            # DEMs and orthophoto might not align!
            if not args.use_3dmesh:
                nonloc.runs += [{
                    'georeferencing_dir':
                    os.path.join(tree.odm_25dgeoreferencing, subdir),
                    'texturing_dir':
                    os.path.join(tree.odm_25dtexturing, subdir),
                }]

            if not args.skip_3dmodel and (primary or args.use_3dmesh):
                nonloc.runs += [{
                    'georeferencing_dir':
                    tree.odm_georeferencing,
                    'texturing_dir':
                    os.path.join(tree.odm_texturing, subdir),
                }]

        if reconstruction.multi_camera:
            for band in reconstruction.multi_camera:
                primary = band['name'] == get_primary_band_name(
                    reconstruction.multi_camera, args.primary_band)
                add_run(primary, band['name'].lower())
        else:
            add_run()

        progress_per_run = 100.0 / len(nonloc.runs)
        progress = 0.0

        for r in nonloc.runs:
            if not io.dir_exists(r['georeferencing_dir']):
                system.mkdir_p(r['georeferencing_dir'])

            odm_georeferencing_model_obj_geo = os.path.join(
                r['texturing_dir'], tree.odm_georeferencing_model_obj_geo)
            odm_georeferencing_model_obj = os.path.join(
                r['texturing_dir'], tree.odm_textured_model_obj)
            odm_georeferencing_log = os.path.join(r['georeferencing_dir'],
                                                  tree.odm_georeferencing_log)
            odm_georeferencing_transform_file = os.path.join(
                r['georeferencing_dir'],
                tree.odm_georeferencing_transform_file)
            odm_georeferencing_model_txt_geo_file = os.path.join(
                r['georeferencing_dir'], tree.odm_georeferencing_model_txt_geo)

            if not io.file_exists(odm_georeferencing_model_obj_geo) or \
               not io.file_exists(tree.odm_georeferencing_model_laz) or self.rerun():

                # odm_georeference definitions
                kwargs = {
                    'bin': context.odm_modules_path,
                    'input_pc_file': tree.filtered_point_cloud,
                    'bundle': tree.opensfm_bundle,
                    'imgs': tree.dataset_raw,
                    'imgs_list': tree.opensfm_bundle_list,
                    'model': odm_georeferencing_model_obj,
                    'log': odm_georeferencing_log,
                    'input_trans_file': tree.opensfm_transformation,
                    'transform_file': odm_georeferencing_transform_file,
                    'coords': tree.odm_georeferencing_coords,
                    'output_pc_file': tree.odm_georeferencing_model_laz,
                    'geo_sys': odm_georeferencing_model_txt_geo_file,
                    'model_geo': odm_georeferencing_model_obj_geo,
                    'verbose': verbose
                }

                if transformPointCloud:
                    kwargs[
                        'pc_params'] = '-inputPointCloudFile {input_pc_file} -outputPointCloudFile {output_pc_file}'.format(
                            **kwargs)

                    if reconstruction.is_georeferenced():
                        kwargs[
                            'pc_params'] += ' -outputPointCloudSrs %s' % pipes.quote(
                                reconstruction.georef.proj4())
                    else:
                        log.ODM_WARNING(
                            'NO SRS: The output point cloud will not have a SRS.'
                        )
                else:
                    kwargs['pc_params'] = ''

                if io.file_exists(
                        tree.opensfm_transformation) and io.file_exists(
                            tree.odm_georeferencing_coords):
                    log.ODM_INFO(
                        'Running georeferencing with OpenSfM transformation matrix'
                    )
                    system.run(
                        '{bin}/odm_georef -bundleFile {bundle} -inputTransformFile {input_trans_file} -inputCoordFile {coords} '
                        '-inputFile {model} -outputFile {model_geo} '
                        '{pc_params} {verbose} '
                        '-logFile {log} -outputTransformFile {transform_file} -georefFileOutputPath {geo_sys}'
                        .format(**kwargs))
                elif io.file_exists(tree.odm_georeferencing_coords):
                    log.ODM_INFO(
                        'Running georeferencing with generated coords file.')
                    system.run(
                        '{bin}/odm_georef -bundleFile {bundle} -inputCoordFile {coords} '
                        '-inputFile {model} -outputFile {model_geo} '
                        '{pc_params} {verbose} '
                        '-logFile {log} -outputTransformFile {transform_file} -georefFileOutputPath {geo_sys}'
                        .format(**kwargs))
                else:
                    log.ODM_WARNING(
                        'Georeferencing failed. Make sure your '
                        'photos have geotags in the EXIF or you have '
                        'provided a GCP file. ')
                    doPointCloudGeo = False  # skip the rest of the georeferencing

                if doPointCloudGeo:
                    reconstruction.georef.extract_offsets(
                        odm_georeferencing_model_txt_geo_file)
                    point_cloud.post_point_cloud_steps(args, tree)

                    if args.crop > 0:
                        log.ODM_INFO(
                            "Calculating cropping area and generating bounds shapefile from point cloud"
                        )
                        cropper = Cropper(tree.odm_georeferencing,
                                          'odm_georeferenced_model')

                        if args.fast_orthophoto:
                            decimation_step = 10
                        else:
                            decimation_step = 40

                        # More aggressive decimation for large datasets
                        if not args.fast_orthophoto:
                            decimation_step *= int(
                                len(reconstruction.photos) / 1000) + 1
                            decimation_step = min(decimation_step, 95)

                        try:
                            cropper.create_bounds_gpkg(
                                tree.odm_georeferencing_model_laz,
                                args.crop,
                                decimation_step=decimation_step)
                        except:
                            log.ODM_WARNING(
                                "Cannot calculate crop bounds! We will skip cropping"
                            )
                            args.crop = 0

                    # Do not execute a second time, since
                    # We might be doing georeferencing for
                    # multiple models (3D, 2.5D, ...)
                    doPointCloudGeo = False
                    transformPointCloud = False
            else:
                log.ODM_WARNING('Found a valid georeferenced model in: %s' %
                                tree.odm_georeferencing_model_laz)

            if args.optimize_disk_space and io.file_exists(
                    tree.odm_georeferencing_model_laz) and io.file_exists(
                        tree.filtered_point_cloud):
                os.remove(tree.filtered_point_cloud)

            progress += progress_per_run
            self.update_progress(progress)
Esempio n. 4
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        photos = reconstruction.photos

        if not photos:
            raise system.ExitException(
                'Not enough photos in photos array to start OpenSfM')

        octx = OSFMContext(tree.opensfm)
        octx.setup(args,
                   tree.dataset_raw,
                   reconstruction=reconstruction,
                   rerun=self.rerun())
        octx.photos_to_metadata(photos, self.rerun())
        self.update_progress(20)
        octx.feature_matching(self.rerun())
        self.update_progress(30)
        octx.reconstruct(self.rerun())
        octx.extract_cameras(tree.path("cameras.json"), self.rerun())
        self.update_progress(70)

        def cleanup_disk_space():
            if args.optimize_disk_space:
                for folder in ["features", "matches", "reports"]:
                    folder_path = octx.path(folder)
                    if os.path.exists(folder_path):
                        if os.path.islink(folder_path):
                            os.unlink(folder_path)
                        else:
                            shutil.rmtree(folder_path)

        # If we find a special flag file for split/merge we stop right here
        if os.path.exists(octx.path("split_merge_stop_at_reconstruction.txt")):
            log.ODM_INFO("Stopping OpenSfM early because we found: %s" %
                         octx.path("split_merge_stop_at_reconstruction.txt"))
            self.next_stage = None
            cleanup_disk_space()
            return

        # Stats are computed in the local CRS (before geoprojection)
        if not args.skip_report:

            # TODO: this will fail to compute proper statistics if
            # the pipeline is run with --skip-report and is subsequently
            # rerun without --skip-report a --rerun-* parameter (due to the reconstruction.json file)
            # being replaced below. It's an isolated use case.

            octx.export_stats(self.rerun())

        self.update_progress(75)

        # We now switch to a geographic CRS
        if reconstruction.is_georeferenced() and (not io.file_exists(
                tree.opensfm_topocentric_reconstruction) or self.rerun()):
            octx.run(
                'export_geocoords --reconstruction --proj "%s" --offset-x %s --offset-y %s'
                % (reconstruction.georef.proj4(),
                   reconstruction.georef.utm_east_offset,
                   reconstruction.georef.utm_north_offset))
            shutil.move(tree.opensfm_reconstruction,
                        tree.opensfm_topocentric_reconstruction)
            shutil.move(tree.opensfm_geocoords_reconstruction,
                        tree.opensfm_reconstruction)
        else:
            log.ODM_WARNING("Will skip exporting %s" %
                            tree.opensfm_geocoords_reconstruction)

        self.update_progress(80)

        updated_config_flag_file = octx.path('updated_config.txt')

        # Make sure it's capped by the depthmap-resolution arg,
        # since the undistorted images are used for MVS
        outputs['undist_image_max_size'] = max(
            gsd.image_max_size(photos,
                               args.orthophoto_resolution,
                               tree.opensfm_reconstruction,
                               ignore_gsd=args.ignore_gsd,
                               has_gcp=reconstruction.has_gcp()),
            get_depthmap_resolution(args, photos))

        if not io.file_exists(updated_config_flag_file) or self.rerun():
            octx.update_config({
                'undistorted_image_max_size':
                outputs['undist_image_max_size']
            })
            octx.touch(updated_config_flag_file)

        # Undistorted images will be used for texturing / MVS

        alignment_info = None
        primary_band_name = None
        largest_photo = None
        undistort_pipeline = []

        def undistort_callback(shot_id, image):
            for func in undistort_pipeline:
                image = func(shot_id, image)
            return image

        def resize_thermal_images(shot_id, image):
            photo = reconstruction.get_photo(shot_id)
            if photo.is_thermal():
                return thermal.resize_to_match(image, largest_photo)
            else:
                return image

        def radiometric_calibrate(shot_id, image):
            photo = reconstruction.get_photo(shot_id)
            if photo.is_thermal():
                return thermal.dn_to_temperature(photo, image,
                                                 tree.dataset_raw)
            else:
                return multispectral.dn_to_reflectance(
                    photo,
                    image,
                    use_sun_sensor=args.radiometric_calibration ==
                    "camera+sun")

        def align_to_primary_band(shot_id, image):
            photo = reconstruction.get_photo(shot_id)

            # No need to align if requested by user
            if args.skip_band_alignment:
                return image

            # No need to align primary
            if photo.band_name == primary_band_name:
                return image

            ainfo = alignment_info.get(photo.band_name)
            if ainfo is not None:
                return multispectral.align_image(image, ainfo['warp_matrix'],
                                                 ainfo['dimension'])
            else:
                log.ODM_WARNING(
                    "Cannot align %s, no alignment matrix could be computed. Band alignment quality might be affected."
                    % (shot_id))
                return image

        if reconstruction.multi_camera:
            largest_photo = find_largest_photo(photos)
            undistort_pipeline.append(resize_thermal_images)

        if args.radiometric_calibration != "none":
            undistort_pipeline.append(radiometric_calibrate)

        image_list_override = None

        if reconstruction.multi_camera:

            # Undistort only secondary bands
            image_list_override = [
                os.path.join(tree.dataset_raw, p.filename) for p in photos
            ]  # if p.band_name.lower() != primary_band_name.lower()

            # We backup the original reconstruction.json, tracks.csv
            # then we augment them by duplicating the primary band
            # camera shots with each band, so that exports, undistortion,
            # etc. include all bands
            # We finally restore the original files later

            added_shots_file = octx.path('added_shots_done.txt')
            s2p, p2s = None, None

            if not io.file_exists(added_shots_file) or self.rerun():
                primary_band_name = multispectral.get_primary_band_name(
                    reconstruction.multi_camera, args.primary_band)
                s2p, p2s = multispectral.compute_band_maps(
                    reconstruction.multi_camera, primary_band_name)

                if not args.skip_band_alignment:
                    alignment_info = multispectral.compute_alignment_matrices(
                        reconstruction.multi_camera,
                        primary_band_name,
                        tree.dataset_raw,
                        s2p,
                        p2s,
                        max_concurrency=args.max_concurrency)
                else:
                    log.ODM_WARNING("Skipping band alignment")
                    alignment_info = {}

                log.ODM_INFO("Adding shots to reconstruction")

                octx.backup_reconstruction()
                octx.add_shots_to_reconstruction(p2s)
                octx.touch(added_shots_file)

            undistort_pipeline.append(align_to_primary_band)

        octx.convert_and_undistort(self.rerun(), undistort_callback,
                                   image_list_override)

        self.update_progress(95)

        if reconstruction.multi_camera:
            octx.restore_reconstruction_backup()

            # Undistort primary band and write undistorted
            # reconstruction.json, tracks.csv
            octx.convert_and_undistort(self.rerun(),
                                       undistort_callback,
                                       runId='primary')

        if not io.file_exists(tree.opensfm_reconstruction_nvm) or self.rerun():
            octx.run('export_visualsfm --points')
        else:
            log.ODM_WARNING(
                'Found a valid OpenSfM NVM reconstruction file in: %s' %
                tree.opensfm_reconstruction_nvm)

        if reconstruction.multi_camera:
            log.ODM_INFO("Multiple bands found")

            # Write NVM files for the various bands
            for band in reconstruction.multi_camera:
                nvm_file = octx.path(
                    "undistorted",
                    "reconstruction_%s.nvm" % band['name'].lower())

                if not io.file_exists(nvm_file) or self.rerun():
                    img_map = {}

                    if primary_band_name is None:
                        primary_band_name = multispectral.get_primary_band_name(
                            reconstruction.multi_camera, args.primary_band)
                    if p2s is None:
                        s2p, p2s = multispectral.compute_band_maps(
                            reconstruction.multi_camera, primary_band_name)

                    for fname in p2s:

                        # Primary band maps to itself
                        if band['name'] == primary_band_name:
                            img_map[add_image_format_extension(
                                fname, 'tif')] = add_image_format_extension(
                                    fname, 'tif')
                        else:
                            band_filename = next(
                                (p.filename for p in p2s[fname]
                                 if p.band_name == band['name']), None)

                            if band_filename is not None:
                                img_map[add_image_format_extension(
                                    fname,
                                    'tif')] = add_image_format_extension(
                                        band_filename, 'tif')
                            else:
                                log.ODM_WARNING(
                                    "Cannot find %s band equivalent for %s" %
                                    (band, fname))

                    nvm.replace_nvm_images(tree.opensfm_reconstruction_nvm,
                                           img_map, nvm_file)
                else:
                    log.ODM_WARNING("Found existing NVM file %s" % nvm_file)

        # Skip dense reconstruction if necessary and export
        # sparse reconstruction instead
        if args.fast_orthophoto:
            output_file = octx.path('reconstruction.ply')

            if not io.file_exists(output_file) or self.rerun():
                octx.run('export_ply --no-cameras --point-num-views')
            else:
                log.ODM_WARNING("Found a valid PLY reconstruction in %s" %
                                output_file)

        cleanup_disk_space()

        if args.optimize_disk_space:
            os.remove(octx.path("tracks.csv"))
            if io.file_exists(octx.recon_backup_file()):
                os.remove(octx.recon_backup_file())

            if io.dir_exists(octx.path("undistorted", "depthmaps")):
                files = glob.glob(
                    octx.path("undistorted", "depthmaps", "*.npz"))
                for f in files:
                    os.remove(f)

            # Keep these if using OpenMVS
            if args.fast_orthophoto:
                files = [
                    octx.path("undistorted", "tracks.csv"),
                    octx.path("undistorted", "reconstruction.json")
                ]
                for f in files:
                    if os.path.exists(f):
                        os.remove(f)
Esempio n. 5
0
    def process(self, args, outputs):
        tree = outputs['tree']
        reconstruction = outputs['reconstruction']
        verbose = '-verbose' if args.verbose else ''

        # define paths and create working directories
        system.mkdir_p(tree.odm_orthophoto)

        if not io.file_exists(tree.odm_orthophoto_tif) or self.rerun():
            gsd_error_estimate = 0.1
            ignore_resolution = False
            if not reconstruction.is_georeferenced():
                # Match DEMs
                gsd_error_estimate = -3
                ignore_resolution = True

            resolution = 1.0 / (
                gsd.cap_resolution(args.orthophoto_resolution,
                                   tree.opensfm_reconstruction,
                                   gsd_error_estimate=gsd_error_estimate,
                                   ignore_gsd=args.ignore_gsd,
                                   ignore_resolution=ignore_resolution,
                                   has_gcp=reconstruction.has_gcp()) / 100.0)

            # odm_orthophoto definitions
            kwargs = {
                'bin': context.odm_modules_path,
                'log': tree.odm_orthophoto_log,
                'ortho': tree.odm_orthophoto_render,
                'corners': tree.odm_orthophoto_corners,
                'res': resolution,
                'bands': '',
                'verbose': verbose
            }

            models = []

            if args.use_3dmesh:
                base_dir = tree.odm_texturing
            else:
                base_dir = tree.odm_25dtexturing

            model_file = tree.odm_textured_model_obj

            if reconstruction.multi_camera:
                for band in reconstruction.multi_camera:
                    primary = band['name'] == get_primary_band_name(
                        reconstruction.multi_camera, args.primary_band)
                    subdir = ""
                    if not primary:
                        subdir = band['name'].lower()
                    models.append(os.path.join(base_dir, subdir, model_file))
                kwargs['bands'] = '-bands %s' % (','.join(
                    [quote(b['name']) for b in reconstruction.multi_camera]))
            else:
                models.append(os.path.join(base_dir, model_file))

            kwargs['models'] = ','.join(map(quote, models))

            # run odm_orthophoto
            system.run(
                '{bin}/odm_orthophoto -inputFiles {models} '
                '-logFile {log} -outputFile {ortho} -resolution {res} {verbose} '
                '-outputCornerFile {corners} {bands}'.format(**kwargs))

            # Create georeferenced GeoTiff
            geotiffcreated = False

            if reconstruction.is_georeferenced():
                ulx = uly = lrx = lry = 0.0
                with open(tree.odm_orthophoto_corners) as f:
                    for lineNumber, line in enumerate(f):
                        if lineNumber == 0:
                            tokens = line.split(' ')
                            if len(tokens) == 4:
                                ulx = float(tokens[0]) + \
                                      float(reconstruction.georef.utm_east_offset)
                                lry = float(tokens[1]) + \
                                      float(reconstruction.georef.utm_north_offset)
                                lrx = float(tokens[2]) + \
                                      float(reconstruction.georef.utm_east_offset)
                                uly = float(tokens[3]) + \
                                      float(reconstruction.georef.utm_north_offset)
                log.ODM_INFO('Creating GeoTIFF')

                orthophoto_vars = orthophoto.get_orthophoto_vars(args)

                kwargs = {
                    'ulx':
                    ulx,
                    'uly':
                    uly,
                    'lrx':
                    lrx,
                    'lry':
                    lry,
                    'vars':
                    ' '.join([
                        '-co %s=%s' % (k, orthophoto_vars[k])
                        for k in orthophoto_vars
                    ]),
                    'proj':
                    reconstruction.georef.proj4(),
                    'input':
                    tree.odm_orthophoto_render,
                    'output':
                    tree.odm_orthophoto_tif,
                    'log':
                    tree.odm_orthophoto_tif_log,
                    'max_memory':
                    get_max_memory(),
                }

                system.run('gdal_translate -a_ullr {ulx} {uly} {lrx} {lry} '
                           '{vars} '
                           '-a_srs \"{proj}\" '
                           '--config GDAL_CACHEMAX {max_memory}% '
                           '--config GDAL_TIFF_INTERNAL_MASK YES '
                           '{input} {output} > {log}'.format(**kwargs))

                bounds_file_path = os.path.join(
                    tree.odm_georeferencing,
                    'odm_georeferenced_model.bounds.gpkg')

                # Cutline computation, before cropping
                # We want to use the full orthophoto, not the cropped one.
                if args.orthophoto_cutline:
                    cutline_file = os.path.join(tree.odm_orthophoto,
                                                "cutline.gpkg")

                    compute_cutline(tree.odm_orthophoto_tif,
                                    bounds_file_path,
                                    cutline_file,
                                    args.max_concurrency,
                                    tmpdir=os.path.join(
                                        tree.odm_orthophoto,
                                        "grass_cutline_tmpdir"),
                                    scale=0.25)

                    orthophoto.compute_mask_raster(
                        tree.odm_orthophoto_tif,
                        cutline_file,
                        os.path.join(tree.odm_orthophoto,
                                     "odm_orthophoto_cut.tif"),
                        blend_distance=20,
                        only_max_coords_feature=True)

                orthophoto.post_orthophoto_steps(args, bounds_file_path,
                                                 tree.odm_orthophoto_tif,
                                                 tree.orthophoto_tiles)

                # Generate feathered orthophoto also
                if args.orthophoto_cutline:
                    orthophoto.feather_raster(
                        tree.odm_orthophoto_tif,
                        os.path.join(tree.odm_orthophoto,
                                     "odm_orthophoto_feathered.tif"),
                        blend_distance=20)

                geotiffcreated = True
            if not geotiffcreated:
                if io.file_exists(tree.odm_orthophoto_render):
                    pseudogeo.add_pseudo_georeferencing(
                        tree.odm_orthophoto_render)
                    log.ODM_INFO(
                        "Renaming %s --> %s" %
                        (tree.odm_orthophoto_render, tree.odm_orthophoto_tif))
                    os.rename(tree.odm_orthophoto_render,
                              tree.odm_orthophoto_tif)
                else:
                    log.ODM_WARNING(
                        "Could not generate an orthophoto (it did not render)")
        else:
            log.ODM_WARNING('Found a valid orthophoto in: %s' %
                            tree.odm_orthophoto_tif)

        if args.optimize_disk_space and io.file_exists(
                tree.odm_orthophoto_render):
            os.remove(tree.odm_orthophoto_render)