コード例 #1
0
ファイル: commands.py プロジェクト: zhongjiejiang/ODM
def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56'], gapfill=True,
                outdir='', resolution=0.1, max_workers=1, max_tile_size=4096,
                verbose=False, decimation=None, keep_unfilled_copy=False,
                apply_smoothing=True):
    """ Create DEM from multiple radii, and optionally gapfill """
    
    global error
    error = None

    start = datetime.now()

    if not os.path.exists(outdir):
        log.ODM_INFO("Creating %s" % outdir)
        os.mkdir(outdir)

    extent = point_cloud.get_extent(input_point_cloud)
    log.ODM_INFO("Point cloud bounds are [minx: %s, maxx: %s] [miny: %s, maxy: %s]" % (extent['minx'], extent['maxx'], extent['miny'], extent['maxy']))
    ext_width = extent['maxx'] - extent['minx']
    ext_height = extent['maxy'] - extent['miny']

    w, h = (int(math.ceil(ext_width / float(resolution))),
            int(math.ceil(ext_height / float(resolution))))

    # Set a floor, no matter the resolution parameter
    # (sometimes a wrongly estimated scale of the model can cause the resolution
    # to be set unrealistically low, causing errors)
    RES_FLOOR = 64
    if w < RES_FLOOR and h < RES_FLOOR:
        prev_w, prev_h = w, h
        
        if w >= h:
            w, h = (RES_FLOOR, int(math.ceil(ext_height / ext_width * RES_FLOOR)))
        else:
            w, h = (int(math.ceil(ext_width / ext_height * RES_FLOOR)), RES_FLOOR)
        
        floor_ratio = prev_w / float(w)
        resolution *= floor_ratio
        radiuses = [str(float(r) * floor_ratio) for r in radiuses]

        log.ODM_WARNING("Really low resolution DEM requested %s will set floor at %s pixels. Resolution changed to %s. The scale of this reconstruction might be off." % ((prev_w, prev_h), RES_FLOOR, resolution))
        
    final_dem_pixels = w * h

    num_splits = int(max(1, math.ceil(math.log(math.ceil(final_dem_pixels / float(max_tile_size * max_tile_size)))/math.log(2))))
    num_tiles = num_splits * num_splits
    log.ODM_INFO("DEM resolution is %s, max tile size is %s, will split DEM generation into %s tiles" % ((h, w), max_tile_size, num_tiles))

    tile_bounds_width = ext_width / float(num_splits)
    tile_bounds_height = ext_height / float(num_splits)

    tiles = []

    for r in radiuses:
        minx = extent['minx']

        for x in range(num_splits):
            miny = extent['miny']
            if x == num_splits - 1:
                maxx = extent['maxx']
            else:
                maxx = minx + tile_bounds_width

            for y in range(num_splits):
                if y == num_splits - 1:
                    maxy = extent['maxy']
                else:
                    maxy = miny + tile_bounds_height

                filename = os.path.join(os.path.abspath(outdir), '%s_r%s_x%s_y%s.tif' % (dem_type, r, x, y))

                tiles.append({
                    'radius': r,
                    'bounds': {
                        'minx': minx,
                        'maxx': maxx,
                        'miny': miny,
                        'maxy': maxy 
                    },
                    'filename': filename
                })

                miny = maxy
            minx = maxx

    # Sort tiles by increasing radius
    tiles.sort(key=lambda t: float(t['radius']), reverse=True)

    def process_tile(q):
        log.ODM_INFO("Generating %s (%s, radius: %s, resolution: %s)" % (q['filename'], output_type, q['radius'], resolution))
        
        d = pdal.json_gdal_base(q['filename'], output_type, q['radius'], resolution, q['bounds'])

        if dem_type == 'dtm':
            d = pdal.json_add_classification_filter(d, 2)

        if decimation is not None:
            d = pdal.json_add_decimation_filter(d, decimation)

        pdal.json_add_readers(d, [input_point_cloud])
        pdal.run_pipeline(d, verbose=verbose)

    parallel_map(process_tile, tiles, max_workers)

    output_file = "%s.tif" % dem_type
    output_path = os.path.abspath(os.path.join(outdir, output_file))

    # Verify tile results
    for t in tiles: 
        if not os.path.exists(t['filename']):
            raise Exception("Error creating %s, %s failed to be created" % (output_file, t['filename']))
    
    # Create virtual raster
    tiles_vrt_path = os.path.abspath(os.path.join(outdir, "tiles.vrt"))
    run('gdalbuildvrt "%s" "%s"' % (tiles_vrt_path, '" "'.join(map(lambda t: t['filename'], tiles))))

    merged_vrt_path = os.path.abspath(os.path.join(outdir, "merged.vrt"))
    geotiff_tmp_path = os.path.abspath(os.path.join(outdir, 'tiles.tmp.tif'))
    geotiff_small_path = os.path.abspath(os.path.join(outdir, 'tiles.small.tif'))
    geotiff_small_filled_path = os.path.abspath(os.path.join(outdir, 'tiles.small_filled.tif'))
    geotiff_path = os.path.abspath(os.path.join(outdir, 'tiles.tif'))

    # Build GeoTIFF
    kwargs = {
        'max_memory': get_max_memory(),
        'threads': max_workers if max_workers else 'ALL_CPUS',
        'tiles_vrt': tiles_vrt_path,
        'merged_vrt': merged_vrt_path,
        'geotiff': geotiff_path,
        'geotiff_tmp': geotiff_tmp_path,
        'geotiff_small': geotiff_small_path,
        'geotiff_small_filled': geotiff_small_filled_path
    }

    if gapfill:
        # Sometimes, for some reason gdal_fillnodata.py
        # behaves strangely when reading data directly from a .VRT
        # so we need to convert to GeoTIFF first.
        run('gdal_translate '
                '-co NUM_THREADS={threads} '
                '--config GDAL_CACHEMAX {max_memory}% '
                '{tiles_vrt} {geotiff_tmp}'.format(**kwargs))

        # Scale to 10% size
        run('gdal_translate '
            '-co NUM_THREADS={threads} '
            '--config GDAL_CACHEMAX {max_memory}% '
            '-outsize 10% 0 '
            '{geotiff_tmp} {geotiff_small}'.format(**kwargs))

        # Fill scaled
        run('gdal_fillnodata.py '
            '-co NUM_THREADS={threads} '
            '--config GDAL_CACHEMAX {max_memory}% '
            '-b 1 '
            '-of GTiff '
            '{geotiff_small} {geotiff_small_filled}'.format(**kwargs))

        # Merge filled scaled DEM with unfilled DEM using bilinear interpolation
        run('gdalbuildvrt -resolution highest -r bilinear "%s" "%s" "%s"' % (merged_vrt_path, geotiff_small_filled_path, geotiff_tmp_path))
        run('gdal_translate '
            '-co NUM_THREADS={threads} '
            '-co TILED=YES '
            '-co COMPRESS=DEFLATE '
            '--config GDAL_CACHEMAX {max_memory}% '
            '{merged_vrt} {geotiff}'.format(**kwargs))
    else:
        run('gdal_translate '
                '-co NUM_THREADS={threads} '
                '-co TILED=YES '
                '-co COMPRESS=DEFLATE '
                '--config GDAL_CACHEMAX {max_memory}% '
                '{tiles_vrt} {geotiff}'.format(**kwargs))

    if apply_smoothing:
        median_smoothing(geotiff_path, output_path)
        os.remove(geotiff_path)
    else:
        os.rename(geotiff_path, output_path)

    if os.path.exists(geotiff_tmp_path):
        if not keep_unfilled_copy: 
            os.remove(geotiff_tmp_path)
        else:
            os.rename(geotiff_tmp_path, io.related_file_path(output_path, postfix=".unfilled"))
    
    for cleanup_file in [tiles_vrt_path, merged_vrt_path, geotiff_small_path, geotiff_small_filled_path]:
        if os.path.exists(cleanup_file): os.remove(cleanup_file)
    for t in tiles:
        if os.path.exists(t['filename']): os.remove(t['filename'])
    
    log.ODM_INFO('Completed %s in %s' % (output_file, datetime.now() - start))
コード例 #2
0
ファイル: dataset.py プロジェクト: MobileScientists/ODM
    def process(self, args, outputs):
        outputs['start_time'] = system.now_raw()
        tree = types.ODM_Tree(args.project_path, args.gcp, args.geo)
        outputs['tree'] = tree

        if args.time and io.file_exists(tree.benchmarking):
            # Delete the previously made file
            os.remove(tree.benchmarking)
            with open(tree.benchmarking, 'a') as b:
                b.write('ODM Benchmarking file created %s\nNumber of Cores: %s\n\n' % (system.now(), context.num_cores))
    
        # check if the image filename is supported
        def valid_image_filename(filename):
            (pathfn, ext) = os.path.splitext(filename)
            return ext.lower() in context.supported_extensions and pathfn[-5:] != "_mask"

        # Get supported images from dir
        def get_images(in_dir):
            log.ODM_DEBUG(in_dir)
            entries = os.listdir(in_dir)
            valid, rejects = [], []
            for f in entries:
                if valid_image_filename(f):
                    valid.append(f)
                else:
                    rejects.append(f)
            return valid, rejects

        def find_mask(photo_path, masks):
            (pathfn, ext) = os.path.splitext(os.path.basename(photo_path))
            k = "{}_mask".format(pathfn)
            
            mask = masks.get(k)
            if mask:
                # Spaces are not supported due to OpenSfM's mask_list.txt format reqs
                if not " " in mask:
                    return mask
                else:
                    log.ODM_WARNING("Image mask {} has a space. Spaces are currently not supported for image masks.".format(mask))

        # get images directory
        images_dir = tree.dataset_raw

        # define paths and create working directories
        system.mkdir_p(tree.odm_georeferencing)

        log.ODM_INFO('Loading dataset from: %s' % images_dir)

        # check if we rerun cell or not
        images_database_file = os.path.join(tree.root_path, 'images.json')
        if not io.file_exists(images_database_file) or self.rerun():
            if not os.path.exists(images_dir):
                raise system.ExitException("There are no images in %s! Make sure that your project path and dataset name is correct. The current is set to: %s" % (images_dir, args.project_path))

            files, rejects = get_images(images_dir)
            if files:
                # create ODMPhoto list
                path_files = [os.path.join(images_dir, f) for f in files]

                # Lookup table for masks
                masks = {}
                for r in rejects:
                    (p, ext) = os.path.splitext(r)
                    if p[-5:] == "_mask" and ext.lower() in context.supported_extensions:
                        masks[p] = r

                photos = []
                with open(tree.dataset_list, 'w') as dataset_list:
                    log.ODM_INFO("Loading %s images" % len(path_files))
                    for f in path_files:
                        try:
                            p = types.ODM_Photo(f)
                            p.set_mask(find_mask(f, masks))
                            photos.append(p)
                            dataset_list.write(photos[-1].filename + '\n')
                        except PhotoCorruptedException:
                            log.ODM_WARNING("%s seems corrupted and will not be used" % os.path.basename(f))

                # Check if a geo file is available
                if tree.odm_geo_file is not None and os.path.isfile(tree.odm_geo_file):
                    log.ODM_INFO("Found image geolocation file")
                    gf = GeoFile(tree.odm_geo_file)
                    updated = 0
                    for p in photos:
                        entry = gf.get_entry(p.filename)
                        if entry:
                            p.update_with_geo_entry(entry)
                            p.compute_opk()
                            updated += 1
                    log.ODM_INFO("Updated %s image positions" % updated)

                # GPSDOP override if we have GPS accuracy information (such as RTK)
                if 'gps_accuracy_is_set' in args:
                    log.ODM_INFO("Forcing GPS DOP to %s for all images" % args.gps_accuracy)

                    for p in photos:
                        p.override_gps_dop(args.gps_accuracy)
                
                # Override projection type
                if args.camera_lens != "auto":
                    log.ODM_INFO("Setting camera lens to %s for all images" % args.camera_lens)

                    for p in photos:
                        p.override_camera_projection(args.camera_lens)

                # Automatic sky removal
                if args.sky_removal:
                    # For each image that :
                    #  - Doesn't already have a mask, AND
                    #  - Is not nadir (or if orientation info is missing), AND
                    #  - There are no spaces in the image filename (OpenSfM requirement)
                    # Automatically generate a sky mask
                    
                    # Generate list of sky images
                    sky_images = []
                    for p in photos:
                        if p.mask is None and (p.pitch is None or (abs(p.pitch) > 20)) and (not " " in p.filename):
                            sky_images.append({'file': os.path.join(images_dir, p.filename), 'p': p})

                    if len(sky_images) > 0:
                        log.ODM_INFO("Automatically generating sky masks for %s images" % len(sky_images))
                        model = ai.get_model("skyremoval", "https://github.com/OpenDroneMap/SkyRemoval/releases/download/v1.0.5/model.zip", "v1.0.5")
                        if model is not None:
                            sf = SkyFilter(model=model)

                            def parallel_sky_filter(item):
                                try:
                                    mask_file = sf.run_img(item['file'], images_dir)

                                    # Check and set
                                    if mask_file is not None and os.path.isfile(mask_file):
                                        item['p'].set_mask(os.path.basename(mask_file))
                                        log.ODM_INFO("Wrote %s" % os.path.basename(mask_file))
                                    else:
                                        log.ODM_WARNING("Cannot generate mask for %s" % img)
                                except Exception as e:
                                    log.ODM_WARNING("Cannot generate mask for %s: %s" % (img, str(e)))

                            parallel_map(parallel_sky_filter, sky_images, max_workers=args.max_concurrency)

                            log.ODM_INFO("Sky masks generation completed!")
                        else:
                            log.ODM_WARNING("Cannot load AI model (you might need to be connected to the internet?)")
                    else:
                        log.ODM_INFO("No sky masks will be generated (masks already provided, or images are nadir)")

                # End sky removal

                # Save image database for faster restart
                save_images_database(photos, images_database_file)
            else:
                raise system.ExitException('Not enough supported images in %s' % images_dir)
        else:
            # We have an images database, just load it
            photos = load_images_database(images_database_file)

        log.ODM_INFO('Found %s usable images' % len(photos))
        log.logger.log_json_images(len(photos))

        # Create reconstruction object
        reconstruction = types.ODM_Reconstruction(photos)
        
        if tree.odm_georeferencing_gcp and not args.use_exif:
            reconstruction.georeference_with_gcp(tree.odm_georeferencing_gcp,
                                                 tree.odm_georeferencing_coords,
                                                 tree.odm_georeferencing_gcp_utm,
                                                 tree.odm_georeferencing_model_txt_geo,
                                                 rerun=self.rerun())
        else:
            reconstruction.georeference_with_gps(tree.dataset_raw, 
                                                 tree.odm_georeferencing_coords, 
                                                 tree.odm_georeferencing_model_txt_geo,
                                                 rerun=self.rerun())
        
        reconstruction.save_proj_srs(os.path.join(tree.odm_georeferencing, tree.odm_georeferencing_proj))
        outputs['reconstruction'] = reconstruction

        # Try to load boundaries
        if args.boundary:
            if reconstruction.is_georeferenced():
                outputs['boundary'] = boundary.load_boundary(args.boundary, reconstruction.get_proj_srs())
            else:
                args.boundary = None
                log.ODM_WARNING("Reconstruction is not georeferenced, but boundary file provided (will ignore boundary file)")

        # If sfm-algorithm is triangulation, check if photos have OPK
        if args.sfm_algorithm == 'triangulation':
            for p in photos:
                if not p.has_opk():
                    log.ODM_WARNING("No omega/phi/kappa angles found in input photos (%s), switching sfm-algorithm to incremental" % p.filename)
                    args.sfm_algorithm = 'incremental'
                    break
コード例 #3
0
def filter(input_point_cloud, output_point_cloud, standard_deviation=2.5, meank=16, sample_radius=0, verbose=False, max_concurrency=1):
    """
    Filters a point cloud
    """
    if not os.path.exists(input_point_cloud):
        log.ODM_ERROR("{} does not exist. The program will now exit.".format(input_point_cloud))
        sys.exit(1)

    if (standard_deviation <= 0 or meank <= 0) and sample_radius <= 0:
        log.ODM_INFO("Skipping point cloud filtering")
        # if using the option `--pc-filter 0`, we need copy input_point_cloud
        shutil.copy(input_point_cloud, output_point_cloud)
        return

    filters = []

    if sample_radius > 0:
        log.ODM_INFO("Sampling points around a %sm radius" % sample_radius)
        filters.append('sample')

    if standard_deviation > 0 and meank > 0:
        log.ODM_INFO("Filtering {} (statistical, meanK {}, standard deviation {})".format(input_point_cloud, meank, standard_deviation))
        filters.append('outlier')

    if len(filters) > 0:
        filters.append('range')

    info = ply_info(input_point_cloud)
    dims = "x=float,y=float,z=float,"
    if info['has_normals']:
        dims += "nx=float,ny=float,nz=float,"
    dims += "red=uchar,blue=uchar,green=uchar"
    if info['has_views']:
        dims += ",views=uchar"

    if info['vertex_count'] == 0:
        log.ODM_ERROR("Cannot read vertex count for {}".format(input_point_cloud))
        sys.exit(1)

    # Do we need to split this?
    VERTEX_THRESHOLD = 250000
    should_split = max_concurrency > 1 and info['vertex_count'] > VERTEX_THRESHOLD*2

    if should_split:
        partsdir = os.path.join(os.path.dirname(output_point_cloud), "parts")
        if os.path.exists(partsdir):
            log.ODM_WARNING("Removing existing directory %s" % partsdir)
            shutil.rmtree(partsdir)

        point_cloud_submodels = split(input_point_cloud, partsdir, "part.ply", capacity=VERTEX_THRESHOLD, dims=dims)

        def run_filter(pcs):
            # Recurse
            filter(pcs['path'], io.related_file_path(pcs['path'], postfix="_filtered"), 
                        standard_deviation=standard_deviation, 
                        meank=meank, 
                        sample_radius=sample_radius, 
                        verbose=verbose,
                        max_concurrency=1)
        # Filter
        parallel_map(run_filter, [{'path': p} for p in point_cloud_submodels], max_concurrency)

        # Merge
        log.ODM_INFO("Merging %s point cloud chunks to %s" % (len(point_cloud_submodels), output_point_cloud))
        filtered_pcs = [io.related_file_path(pcs, postfix="_filtered") for pcs in point_cloud_submodels]
        #merge_ply(filtered_pcs, output_point_cloud, dims)
        fast_merge_ply(filtered_pcs, output_point_cloud)

        if os.path.exists(partsdir):
            shutil.rmtree(partsdir)
    else:
        # Process point cloud (or a point cloud submodel) in a single step
        filterArgs = {
            'inputFile': input_point_cloud,
            'outputFile': output_point_cloud,
            'stages': " ".join(filters),
            'dims': dims
        }

        cmd = ("pdal translate -i \"{inputFile}\" "
                "-o \"{outputFile}\" "
                "{stages} "
                "--writers.ply.sized_types=false "
                "--writers.ply.storage_mode='little endian' "
                "--writers.ply.dims=\"{dims}\" "
                "").format(**filterArgs)

        if 'sample' in filters:
            cmd += "--filters.sample.radius={} ".format(sample_radius)
        
        if 'outlier' in filters:
            cmd += ("--filters.outlier.method='statistical' "
                "--filters.outlier.mean_k={} "
                "--filters.outlier.multiplier={} ").format(meank, standard_deviation)  
        
        if 'range' in filters:
            # Remove outliers
            cmd += "--filters.range.limits='Classification![7:7]' "

        system.run(cmd)

    if not os.path.exists(output_point_cloud):
        log.ODM_WARNING("{} not found, filtering has failed.".format(output_point_cloud))
コード例 #4
0
ファイル: multispectral.py プロジェクト: zanmange/ODM
def compute_alignment_matrices(multi_camera,
                               primary_band_name,
                               images_path,
                               s2p,
                               p2s,
                               max_concurrency=1,
                               max_samples=30):
    log.ODM_INFO("Computing band alignment")

    alignment_info = {}

    # For each secondary band
    for band in multi_camera:
        if band['name'] != primary_band_name:
            matrices = []

            def parallel_compute_homography(p):
                try:
                    if len(matrices) >= max_samples:
                        # log.ODM_INFO("Got enough samples for %s (%s)" % (band['name'], max_samples))
                        return

                    # Find good matrix candidates for alignment

                    primary_band_photo = s2p.get(p['filename'])
                    if primary_band_photo is None:
                        log.ODM_WARNING(
                            "Cannot find primary band photo for %s" %
                            p['filename'])
                        return

                    warp_matrix, dimension, algo = compute_homography(
                        os.path.join(images_path, p['filename']),
                        os.path.join(images_path, primary_band_photo.filename))

                    if warp_matrix is not None:
                        log.ODM_INFO(
                            "%s --> %s good match" %
                            (p['filename'], primary_band_photo.filename))

                        matrices.append({
                            'warp_matrix':
                            warp_matrix,
                            'eigvals':
                            np.linalg.eigvals(warp_matrix),
                            'dimension':
                            dimension,
                            'algo':
                            algo
                        })
                    else:
                        log.ODM_INFO(
                            "%s --> %s cannot be matched" %
                            (p['filename'], primary_band_photo.filename))
                except Exception as e:
                    log.ODM_WARNING("Failed to compute homography for %s: %s" %
                                    (p['filename'], str(e)))

            parallel_map(parallel_compute_homography, [{
                'filename': p.filename
            } for p in band['photos']],
                         max_concurrency,
                         single_thread_fallback=False)

            # Choose winning algorithm (doesn't seem to yield improvements)
            # feat_count = 0
            # ecc_count = 0
            # for m in matrices:
            #     if m['algo'] == 'feat':
            #         feat_count += 1
            #     if m['algo'] == 'ecc':
            #         ecc_count += 1

            # algo = 'feat' if feat_count >= ecc_count else 'ecc'

            # log.ODM_INFO("Feat: %s | ECC: %s | Winner: %s" % (feat_count, ecc_count, algo))
            # matrices = [m for m in matrices if m['algo'] == algo]

            # Find the matrix that has the most common eigvals
            # among all matrices. That should be the "best" alignment.
            for m1 in matrices:
                acc = np.array([0.0, 0.0, 0.0])
                e = m1['eigvals']

                for m2 in matrices:
                    acc += abs(e - m2['eigvals'])

                m1['score'] = acc.sum()

            # Sort
            matrices.sort(key=lambda x: x['score'], reverse=False)

            if len(matrices) > 0:
                alignment_info[band['name']] = matrices[0]
                log.ODM_INFO(
                    "%s band will be aligned using warp matrix %s (score: %s)"
                    % (band['name'], matrices[0]['warp_matrix'],
                       matrices[0]['score']))
            else:
                log.ODM_WARNING(
                    "Cannot find alignment matrix for band %s, The band will likely be misaligned!"
                    % band['name'])

    return alignment_info