Ejemplo n.º 1
0
def run_geometrics(config_file, ref_path=None, test_path=None, output_path=None,
                   align=True, allow_test_ignore=False, save_aligned=False, save_plots=None):

    # check inputs
    if not os.path.isfile(config_file):
        raise IOError("Configuration file does not exist")

    if output_path is not None and not os.path.isdir(output_path):
        raise IOError('"output_path" not a valid folder <{}>'.format(output_path))

    # parse configuration
    config_path = os.path.dirname(config_file)

    config = geo.parse_config(config_file,
                              refpath=(ref_path or config_path),
                              testpath=(test_path or config_path))

    # Get test model information from configuration file.
    test_dsm_filename = config['INPUT.TEST']['DSMFilename']
    test_dtm_filename = config['INPUT.TEST'].get('DTMFilename', None)
    test_cls_filename = config['INPUT.TEST']['CLSFilename']
    test_conf_filename = config['INPUT.TEST'].get('CONFFilename', None)
    test_mtl_filename = config['INPUT.TEST'].get('MTLFilename', None)

    # Get reference model information from configuration file.
    ref_dsm_filename = config['INPUT.REF']['DSMFilename']
    ref_dtm_filename = config['INPUT.REF']['DTMFilename']
    ref_cls_filename = config['INPUT.REF']['CLSFilename']
    ref_mtl_filename = config['INPUT.REF'].get('MTLFilename', None)

    # Get material label names and list of material labels to ignore in evaluation.
    material_names = config['MATERIALS.REF']['MaterialNames']
    material_indices_to_ignore = config['MATERIALS.REF']['MaterialIndicesToIgnore']

    # Get image pair files
    performer_pair_file = config['INPUT.TEST'].get('ImagePairFilename', None)
    performer_pair_data_file = config['INPUT.TEST'].get('ImagePairDataFilename', None)
    performer_files_chosen_file = config['INPUT.TEST'].get('FilesChosenFilename', None)
    
    # Get plot settings from configuration file
    PLOTS_SHOW = config['PLOTS']['ShowPlots']
    PLOTS_SAVE = config['PLOTS']['SavePlots']
    if save_plots is not None:  # Commandline line argument overrided config file setting
        PLOTS_SAVE = save_plots
    PLOTS_ENABLE = PLOTS_SHOW or PLOTS_SAVE

    # default output path
    if output_path is None:
        output_path = os.path.dirname(test_dsm_filename)

    if align:
        align = config['OPTIONS']['AlignModel']
    save_aligned = config['OPTIONS']['SaveAligned'] | save_aligned

    # Determine multiprocessing usage
    use_multiprocessing = config['OPTIONS']['UseMultiprocessing']

    # Configure plotting
    basename = os.path.basename(test_dsm_filename)
    if PLOTS_ENABLE:
        plot = geo.plot(saveDir=output_path, autoSave=PLOTS_SAVE, savePrefix=basename + '_', badColor='black', showPlots=PLOTS_SHOW, dpi=900)
    else:
        plot = None

    # copy testDSM to the output path
    # this is a workaround for the "align3d" function with currently always
    # saves new files to the same path as the testDSM
    src = test_dsm_filename
    dst = os.path.join(output_path, os.path.basename(src))
    if not os.path.isfile(dst): shutil.copyfile(src, dst)
    test_dsm_filename_copy = dst

    # Register test model to ground truth reference model.
    if not align:
        print('\nSKIPPING REGISTRATION')
        xyz_offset = (0.0, 0.0, 0.0)
    else:
        print('\n=====REGISTRATION====='); sys.stdout.flush()
        try:
            align3d_path = config['REGEXEPATH']['Align3DPath']
        except:
            align3d_path = None
        xyz_offset = geo.align3d(ref_dsm_filename, test_dsm_filename_copy, exec_path=align3d_path)
        print(xyz_offset)
        #xyz_offset = geo.align3d_python(ref_dsm_filename, test_dsm_filename_copy)

    # Explicitly assign a no data value to warped images to track filled pixels
    no_data_value = -9999

    # Read reference model files.
    print("\nReading reference model files...")
    ref_cls, tform = geo.imageLoad(ref_cls_filename)
    ref_dsm = geo.imageWarp(ref_dsm_filename, ref_cls_filename, noDataValue=no_data_value)
    ref_dtm = geo.imageWarp(ref_dtm_filename, ref_cls_filename, noDataValue=no_data_value)

    # Validate shape of reference files
    if ref_cls.shape != ref_dsm.shape or ref_cls.shape != ref_dtm.shape:
        print("Need to rescale")

    if ref_mtl_filename:
        ref_mtl = geo.imageWarp(ref_mtl_filename, ref_cls_filename, interp_method=gdalconst.GRA_NearestNeighbour).astype(np.uint8)
        if save_aligned:
            geo.arrayToGeotiff(ref_mtl, os.path.join(output_path, basename + '_ref_mtl_reg_out'), ref_cls_filename, no_data_value)
    else:
        ref_mtl = None
        print('NO REFERENCE MTL')

    # Read test model files and apply XYZ offsets.
    print("\nReading test model files...")
    test_cls = geo.imageWarp(test_cls_filename, ref_cls_filename, xyz_offset, gdalconst.GRA_NearestNeighbour)
    test_dsm = geo.imageWarp(test_dsm_filename, ref_cls_filename, xyz_offset, noDataValue=no_data_value)

    if test_dtm_filename:
        test_dtm = geo.imageWarp(test_dtm_filename, ref_cls_filename, xyz_offset, noDataValue=no_data_value)
        if save_aligned:
            geo.arrayToGeotiff(test_dtm, os.path.join(output_path, basename + '_test_dtm_reg_out'), ref_cls_filename,
                               no_data_value)
    else:
        print('NO TEST DTM: defaults to reference DTM')
        test_dtm = ref_dtm

    if test_conf_filename:
        test_conf = geo.imageWarp(test_conf_filename,  ref_cls_filename, xyz_offset, noDataValue=no_data_value)
        conf_viz_path = Path(str(Path(test_conf_filename).parent.absolute()),
                             Path(test_conf_filename).stem + '_VIZ.tif')
        test_conf_viz = geo.imageWarpRGB(str(conf_viz_path.absolute()), ref_cls_filename, xyz_offset)
        geo.arrayToGeotiffRGB(test_conf_viz, os.path.join(output_path, 'CONF_VIZ_aligned'), ref_cls_filename,
                              no_data_value)

        geo.arrayToGeotiff(test_conf, os.path.join(output_path, 'CONF_aligned'), ref_cls_filename,
                           no_data_value)
    else:
        test_conf = None
        print("NO TEST CONF")

    if save_aligned:
        geo.arrayToGeotiff(test_cls, os.path.join(output_path, basename + '_test_cls_reg_out'), ref_cls_filename,
                           no_data_value)
        geo.arrayToGeotiff(test_dsm, os.path.join(output_path, basename + '_test_dsm_reg_out'), ref_cls_filename,
                           no_data_value)
        geo.arrayToGeotiff(ref_cls, os.path.join(output_path, basename + '_ref_cls_reg_out'), ref_cls_filename,
                           no_data_value)
        geo.arrayToGeotiff(ref_dsm, os.path.join(output_path, basename + '_ref_dsm_reg_out'), ref_cls_filename,
                           no_data_value)
        geo.arrayToGeotiff(ref_dtm, os.path.join(output_path, basename + '_ref_dtm_reg_out'), ref_cls_filename,
                           no_data_value)

    if test_mtl_filename:
        test_mtl = geo.imageWarp(test_mtl_filename, ref_cls_filename, xyz_offset,
                                 gdalconst.GRA_NearestNeighbour).astype(np.uint8)
        if save_aligned:
            geo.arrayToGeotiff(test_mtl, os.path.join(output_path, basename + '_test_mtl_reg_out'), ref_cls_filename,
                               no_data_value)
    else:
        print('NO TEST MTL')

    print("\n\n")

    # Apply registration offset, only to valid data to allow better tracking of bad data
    print("Applying offset of Z:  %f" % (xyz_offset[2]))
    test_valid_data = (test_dsm != no_data_value)
    if test_dtm_filename:
        test_valid_data &= (test_dtm != no_data_value)

    test_dsm[test_valid_data] = test_dsm[test_valid_data] + xyz_offset[2]
    if test_dtm_filename:
        test_dtm[test_valid_data] = test_dtm[test_valid_data] + xyz_offset[2]

    # Create mask for ignoring points labeled NoData in reference files.
    ref_dsm_no_data_value = no_data_value
    ref_dtm_no_data_value = no_data_value
    ref_cls_no_data_value = geo.getNoDataValue(ref_cls_filename)
    if ref_cls_no_data_value != 65:
        print("WARNING! NODATA TAG IN CLS FILE IS LIKELY INCORRECT. IT SHOULD BE 65.")
        ref_cls_no_data_value = 65
    ignore_mask = np.zeros_like(ref_cls, np.bool)

    # Get reference and test classifications
    ref_cls_match_sets, test_cls_match_sets = geo.getMatchValueSets(config['INPUT.REF']['CLSMatchValue'],
                                                                    config['INPUT.TEST']['CLSMatchValue'],
                                                                    np.unique(ref_cls).tolist(),
                                                                    np.unique(test_cls).tolist())
    # Add ignore mask on boundaries of cls
    # Ignore edges
    ignore_edges = False
    if ignore_edges is True:
        print("Applying ignore mask to edges of buildings...")
        for index, (ref_match_value, test_match_value) in enumerate(zip(ref_cls_match_sets, test_cls_match_sets)):
            import scipy.ndimage as ndimage
            ref_mask = np.zeros_like(ref_cls, np.bool)
            for v in ref_match_value:
                ref_mask[ref_cls == v] = True
            strel = ndimage.generate_binary_structure(2, 2)
            dilated_cls = ndimage.binary_dilation(ref_mask, structure=strel, iterations=3)
            eroded_cls = ndimage.binary_erosion(ref_mask, structure=strel, iterations=3)
            dilation_mask = np.bitwise_xor(ref_mask, dilated_cls)
            erosion_mask = np.bitwise_xor(ref_mask, eroded_cls)
            ref_cls[dilation_mask == True] = ref_cls_no_data_value
            ref_cls[erosion_mask == True] = ref_cls_no_data_value
        print("Finished applying ignore mask to edges of buildings...")

    # Create ignore mask
    if ref_dsm_no_data_value is not None:
        ignore_mask[ref_dsm == ref_dsm_no_data_value] = True
    if ref_dtm_no_data_value is not None:
        ignore_mask[ref_dtm == ref_dtm_no_data_value] = True
    if ref_cls_no_data_value is not None:
        ignore_mask[ref_cls == ref_cls_no_data_value] = True

    # optionally ignore test NoDataValue(s)
    if allow_test_ignore:
        if allow_test_ignore == 1:
            test_cls_no_data_value = geo.getNoDataValue(test_cls_filename)
            if test_cls_no_data_value is not None:
                print('Ignoring test CLS NoDataValue')
                ignore_mask[test_cls == test_cls_no_data_value] = True

        elif allow_test_ignore == 2:
            test_dsm_no_data_value = no_data_value
            test_dtm_no_data_value = no_data_value
            if test_dsm_no_data_value is not None:
                print('Ignoring test DSM NoDataValue')
                ignore_mask[test_dsm == test_dsm_no_data_value] = True
            if test_dtm_filename and test_dtm_no_data_value is not None:
                print('Ignoring test DTM NoDataValue')
                ignore_mask[test_dtm == test_dtm_no_data_value] = True

        else:
            raise IOError('Unrecognized test ignore value={}'.format(allow_test_ignore))

        print("")

    # sanity check
    if np.all(ignore_mask):
        raise ValueError('All pixels are ignored')

    ##### COMMENT HERE FOR TESTING METICS IMAGES #####
    # report "data voids"
    num_data_voids = np.sum(ignore_mask > 0)
    print('Number of data voids in ignore mask = ', num_data_voids)

    # If quantizing to voxels, then match vertical spacing to horizontal spacing.
    QUANTIZE = config['OPTIONS']['QuantizeHeight']
    if QUANTIZE:
        unit_hgt = geo.getUnitHeight(tform)
        ref_dsm = np.round(ref_dsm / unit_hgt) * unit_hgt
        ref_dtm = np.round(ref_dtm / unit_hgt) * unit_hgt
        test_dsm = np.round(test_dsm / unit_hgt) * unit_hgt
        test_dtm = np.round(test_dtm / unit_hgt) * unit_hgt
        no_data_value = np.round(no_data_value / unit_hgt) * unit_hgt

    if PLOTS_ENABLE:
        # Make image pair plots
        plot.make_image_pair_plots(performer_pair_data_file, performer_pair_file, performer_files_chosen_file, 201,
                                   saveName="image_pair_plot")
        # Reference models can include data voids, so ignore invalid data on display
        plot.make(ref_dsm, 'Reference DSM', 111, colorbar=True, saveName="input_refDSM", badValue=no_data_value)
        plot.make(ref_dtm, 'Reference DTM', 112, colorbar=True, saveName="input_refDTM", badValue=no_data_value)
        plot.make(ref_cls, 'Reference Classification', 113,  colorbar=True, saveName="input_refClass")

        # Test models shouldn't have any invalid data
        # so display the invalid values to highlight them,
        # unlike with the refSDM/refDTM
        plot.make(test_dsm, 'Test DSM', 151, colorbar=True, saveName="input_testDSM")
        plot.make(test_dtm, 'Test DTM', 152, colorbar=True, saveName="input_testDTM")
        plot.make(test_cls, 'Test Classification', 153, colorbar=True, saveName="input_testClass")

        plot.make(ignore_mask, 'Ignore Mask', 181, saveName="input_ignoreMask")

        # material maps
        if ref_mtl_filename and test_mtl_filename:
            plot.make(ref_mtl, 'Reference Materials', 191, colorbar=True, saveName="input_refMTL", vmin=0, vmax=13)
            plot.make(test_mtl, 'Test Materials', 192, colorbar=True, saveName="input_testMTL", vmin=0, vmax=13)

    # Run the threshold geometry metrics and report results.
    metrics = dict()

    # Run threshold geometry and relative accuracy
    threshold_geometry_results = []
    relative_accuracy_results = []
    objectwise_results = []

    if PLOTS_ENABLE:
        # Update plot prefix include counter to be unique for each set of CLS value evaluated
        original_save_prefix = plot.savePrefix

    # Loop through sets of CLS match values
    for index, (ref_match_value,test_match_value) in enumerate(zip(ref_cls_match_sets, test_cls_match_sets)):
        print("Evaluating CLS values")
        print("  Reference match values: " + str(ref_match_value))
        print("  Test match values: " + str(test_match_value))

        # object masks based on CLSMatchValue(s)
        ref_mask = np.zeros_like(ref_cls, np.bool)
        for v in ref_match_value:
            ref_mask[ref_cls == v] = True

        test_mask = np.zeros_like(test_cls, np.bool)
        if len(test_match_value):
            for v in test_match_value:
                test_mask[test_cls == v] = True

        if PLOTS_ENABLE:
            plot.savePrefix = original_save_prefix + "%03d" % index + "_"
            plot.make(test_mask.astype(np.int), 'Test Evaluation Mask', 154, saveName="input_testMask")
            plot.make(ref_mask.astype(np.int), 'Reference Evaluation Mask', 114, saveName="input_refMask")

        if config['OBJECTWISE']['Enable']:
            print("\nRunning objectwise metrics...")
            merge_radius = config['OBJECTWISE']['MergeRadius']
            [result, test_ndx, ref_ndx] = geo.run_objectwise_metrics(ref_dsm, ref_dtm, ref_mask, test_dsm, test_dtm,
                                                                     test_mask, tform, ignore_mask, merge_radius,
                                                                     plot=plot, geotiff_filename=ref_dsm_filename,
                                                                     use_multiprocessing=use_multiprocessing)

            # Get UTM coordinates from pixel coordinates in building centroids
            print("Creating KML and CSVs...")
            import gdal, osr, simplekml, csv
            kml = simplekml.Kml()
            ds = gdal.Open(ref_dsm_filename)
            # get CRS from dataset
            crs = osr.SpatialReference()
            crs.ImportFromWkt(ds.GetProjectionRef())
            # create lat/long crs with WGS84 datum
            crsGeo = osr.SpatialReference()
            crsGeo.ImportFromEPSG(4326)  # 4326 is the EPSG id of lat/long crs
            t = osr.CoordinateTransformation(crs, crsGeo)
            # Use CORE3D objectwise
            current_class = test_match_value[0]
            with open(Path(output_path, "objectwise_numbers_class_" + str(current_class) + ".csv"), mode='w') as \
                    objectwise_csv:
                objectwise_writer = csv.writer(objectwise_csv, delimiter=',', quotechar='"',
                                               quoting=csv.QUOTE_MINIMAL)
                objectwise_writer.writerow(
                    ['Index', 'iou_2d', 'iou_3d', 'hrmse', 'zrmse', 'x_coord', 'y_coord', 'geo_x_coord',
                     'geo_y_coord', 'long', 'lat'])
                for current_object in result['objects']:
                    test_index = current_object['test_objects'][0]
                    iou_2d = current_object['threshold_geometry']['2D']['jaccardIndex']
                    iou_3d = current_object['threshold_geometry']['3D']['jaccardIndex']
                    hrmse = current_object['relative_accuracy']['hrmse']
                    zrmse = current_object['relative_accuracy']['zrmse']
                    x_coords, y_coords = np.where(test_ndx == test_index)
                    x_coord = np.average(x_coords)
                    y_coord = np.average(y_coords)
                    geo_x_coord = tform[0] + y_coord * tform[1] + x_coord * tform[2]
                    geo_y_coord = tform[3] + y_coord * tform[4] + x_coord * tform[5]
                    (lat, long, z) = t.TransformPoint(geo_x_coord, geo_y_coord)
                    objectwise_writer.writerow([test_index, iou_2d, iou_3d, hrmse, zrmse, x_coord, y_coord,
                                                geo_x_coord, geo_y_coord, long, lat])
                    pnt = kml.newpoint(name="Building Index: " + str(test_index),
                                       description="2D IOU: " + str(iou_2d) + ' 3D IOU: ' + str(iou_3d) + ' HRMSE: '
                                                   + str(hrmse) + ' ZRMSE: ' + str(zrmse),
                                       coords=[(lat, long)])
                kml.save(Path(output_path, "objectwise_ious_class_" + str(current_class) + ".kml"))

            # Use FFDA objectwise
            with open(Path(output_path, "objectwise_numbers_no_morphology_class_" + str(current_class) + ".csv"),
                      mode='w') as objectwise_csv:
                objectwise_writer = csv.writer(objectwise_csv, delimiter=',', quotechar='"',
                                               quoting=csv.QUOTE_MINIMAL)
                objectwise_writer.writerow(['iou', 'x_coord', 'y_coord', 'geo_x_coord',
                                            'geo_y_coord', 'long', 'lat'])
                for i in result['metrics_container_no_merge'].iou_per_gt_building.keys():
                    iou = result['metrics_container_no_merge'].iou_per_gt_building[i][0]
                    x_coord = result['metrics_container_no_merge'].iou_per_gt_building[i][1][0]
                    y_coord = result['metrics_container_no_merge'].iou_per_gt_building[i][1][1]
                    geo_x_coord = tform[0] + y_coord * tform[1] + x_coord * tform[2]
                    geo_y_coord = tform[3] + y_coord * tform[4] + x_coord * tform[5]
                    (lat, long, z) = t.TransformPoint(geo_x_coord, geo_y_coord)
                    objectwise_writer.writerow([iou, x_coord, y_coord, geo_x_coord, geo_y_coord, long, lat])
                    pnt = kml.newpoint(name="Building Index: " + str(i), description=str(iou), coords=[(lat, long)])
            kml.save(Path(output_path, "objectwise_ious_no_morphology_class_" + str(current_class) + ".kml"))
            # Result
            if ref_match_value == test_match_value:
                result['CLSValue'] = ref_match_value
            else:
                result['CLSValue'] = {'Ref': ref_match_value, "Test": test_match_value}
            # Delete non-json dumpable metrics
            del result['metrics_container_no_merge'], result['metrics_container_merge_fp'], result[
                'metrics_container_merge_fn']
            objectwise_results.append(result)

            # Save index files to compute objectwise metrics
            obj_save_prefix = basename + "_%03d" % index + "_"
            geo.arrayToGeotiff(test_ndx, os.path.join(output_path, obj_save_prefix + '_test_ndx_objs'),
                               ref_cls_filename, no_data_value)
            geo.arrayToGeotiff(ref_ndx, os.path.join(output_path, obj_save_prefix + '_ref_ndx_objs'),
                               ref_cls_filename,
                               no_data_value)

        # Evaluate threshold geometry metrics using refDTM as the testDTM to mitigate effects of terrain modeling
        # uncertainty
        result, _, stoplight_fn, errhgt_fn = geo.run_threshold_geometry_metrics(ref_dsm, ref_dtm, ref_mask, test_dsm, test_dtm, test_mask, tform,
                                                    ignore_mask, testCONF=test_conf, plot=plot)
        if ref_match_value == test_match_value:
            result['CLSValue'] = ref_match_value
        else:
            result['CLSValue'] = {'Ref': ref_match_value, "Test": test_match_value}
        threshold_geometry_results.append(result)

        # Run the relative accuracy metrics and report results.
        # Skip relative accuracy is all of testMask or refMask is assigned as "object"
        if not ((ref_mask.size == np.count_nonzero(ref_mask)) or (test_mask.size == np.count_nonzero(test_mask))) and len(test_match_value) != 0:
            try:
                result = geo.run_relative_accuracy_metrics(ref_dsm, test_dsm, ref_mask, test_mask, ignore_mask,
                                                           geo.getUnitWidth(tform), plot=plot)
                if ref_match_value == test_match_value:
                    result['CLSValue'] = ref_match_value
                else:
                    result['CLSValue'] = {'Ref': ref_match_value, "Test": test_match_value}
                relative_accuracy_results.append(result)
            except Exception as e:
                print(str(e))

    if PLOTS_ENABLE:
        # Reset plot prefix
        plot.savePrefix = original_save_prefix

    metrics['threshold_geometry'] = threshold_geometry_results
    metrics['relative_accuracy'] = relative_accuracy_results
    metrics['objectwise'] = objectwise_results

    if align:
        metrics['registration_offset'] = xyz_offset
        metrics['geolocation_error'] = np.linalg.norm(xyz_offset)

    # Run the terrain model metrics and report results.
    if test_dtm_filename:
        dtm_z_threshold = config['OPTIONS'].get('TerrainZErrorThreshold', 1)

        # Make reference mask for terrain evaluation that identified elevated object where underlying terrain estimate
        # is expected to be inaccurate
        dtm_cls_ignore_values = config['INPUT.REF'].get('TerrainCLSIgnoreValues', [6, 17]) # Default to building and bridge deck
        dtm_cls_ignore_values = geo.validateMatchValues(dtm_cls_ignore_values,np.unique(ref_cls).tolist())
        ref_mask_terrain_acc = np.zeros_like(ref_cls, np.bool)
        for v in dtm_cls_ignore_values:
            ref_mask_terrain_acc[ref_cls == v] = True

        metrics['terrain_accuracy'] = geo.run_terrain_accuracy_metrics(ref_dtm, test_dtm, ref_mask_terrain_acc,
                                                                       dtm_z_threshold, plot=plot)
    else:
        print('WARNING: No test DTM file, skipping terrain accuracy metrics')

    # Run the threshold material metrics and report results.
    if test_mtl_filename and ref_mtl:
        metrics['threshold_materials'] = geo.run_material_metrics(ref_ndx, ref_mtl, test_mtl, material_names,
                                                                  material_indices_to_ignore, plot=plot)
    else:
        print('WARNING: No test MTL file or no reference material, skipping material metrics')

    fileout = os.path.join(output_path, os.path.basename(config_file) + "_metrics.json")
    with open(fileout, 'w') as fid:
        json.dump(metrics, fid, indent=2)
    print(json.dumps(metrics, indent=2))
    print("Metrics report: " + fileout)

    #  If displaying figures, wait for user before exiting
    if PLOTS_SHOW:
            input("Press Enter to continue...")

    # Write final metrics out
    output_folder = os.path.join(output_path, "metrics_final")
    try:
        os.mkdir(output_folder)
    except OSError as e:
        if e.errno == errno.EEXIST:
            pass
        else:
            print("Can't create directory, please check permissions...")
            raise

    # Run Roof slope metrics
    # Roof Slope Metrics
    try:
        from core3dmetrics.geometrics.ang import calculate_metrics as calculate_roof_metrics
    except:
        from ang import calculate_metrics as calculate_roof_metrics

    IOUC, IOUZ, IOUAGL, IOUMZ, orderRMS = calculate_roof_metrics(ref_dsm, ref_dtm, ref_cls, test_dsm, test_dtm,
                           test_cls, tform, kernel_radius=3, output_path=output_path)
    files = [str(Path(output_path, filename).absolute()) for filename in os.listdir(output_path) if
             filename.startswith("Roof")]

    # Save all of myrons outputs here
    metrics_formatted = {}
    metrics_formatted["2D"] = {}
    metrics_formatted["2D"]["Precision"] = metrics["threshold_geometry"][0]['2D']['precision']
    metrics_formatted["2D"]["Recall"] = metrics["threshold_geometry"][0]['2D']['recall']
    metrics_formatted["2D"]["IOU"] = metrics["threshold_geometry"][0]['2D']['jaccardIndex']
    metrics_formatted["3D"] = {}
    metrics_formatted["3D"]["Precision"] = metrics["threshold_geometry"][0]['3D']['precision']
    metrics_formatted["3D"]["Recall"] = metrics["threshold_geometry"][0]['3D']['recall']
    metrics_formatted["3D"]["IOU"] = metrics["threshold_geometry"][0]['3D']['jaccardIndex']
    metrics_formatted["ZRMS"] = metrics['relative_accuracy'][0]['zrmse']
    metrics_formatted["HRMS"] = metrics['relative_accuracy'][0]['hrmse']
    metrics_formatted["Slope RMS"] = orderRMS
    metrics_formatted["DTM RMS"] = metrics['terrain_accuracy']['zrmse']
    metrics_formatted["DTM Completeness"] = metrics['terrain_accuracy']['completeness']
    metrics_formatted["Z IOU"] = IOUZ
    metrics_formatted["AGL IOU"] = IOUAGL
    metrics_formatted["MODEL IOU"] = IOUMZ
    metrics_formatted["X Offset"] = xyz_offset[0]
    metrics_formatted["Y Offset"] = xyz_offset[1]
    metrics_formatted["Z Offset"] = xyz_offset[2]
    metrics_formatted["P Value"] = metrics['threshold_geometry'][0]['pearson']

    fileout = os.path.join(output_folder, "metrics.json")
    with open(fileout, 'w') as fid:
        json.dump(metrics_formatted, fid, indent=2)
    print(json.dumps(metrics_formatted, indent=2))

    # metrics.png
    if PLOTS_ENABLE:
        cls_iou_fn = [filename for filename in files if filename.endswith("CLS_IOU.tif")][0]
        cls_z_iou_fn = [filename for filename in files if filename.endswith("CLS_Z_IOU.tif")][0]
        cls_z_slope_fn = [filename for filename in files if filename.endswith("CLS_Z_SLOPE_IOU.tif")][0]
        if test_conf_filename:
            plot.make_final_metrics_images(stoplight_fn, errhgt_fn, Path(os.path.join(output_path, 'CONF_VIZ_aligned.tif')),
                                           cls_iou_fn, cls_z_iou_fn, cls_z_slope_fn, ref_cls, output_folder)

    # inputs.png
        plot.make_final_input_images_grayscale(ref_cls, ref_dsm, ref_dtm, test_cls,
                                                test_dsm, test_dtm, output_folder)
    # textured.png
    if config['BLENDER.TEST']['OBJDirectoryFilename']:
        try:
            from CORE3D_Perspective_Imagery import generate_blender_images
            objpath = config['BLENDER.TEST']['OBJDirectoryFilename']
            gsd = config['BLENDER.TEST']['GSD']
            Zup = config['BLENDER.TEST']['+Z']
            N = config['BLENDER.TEST']['OrbitalLocations']
            e = config['BLENDER.TEST']['ElevationAngle']
            f = config['BLENDER.TEST']['FocalLength']
            r = config['BLENDER.TEST']['RadialDistance']
            output_location = generate_blender_images(objpath, gsd, Zup, N, e, f, r, output_path)
            files = [str(Path(output_path, filename).absolute()) for filename in os.listdir(output_path) if
                     filename.startswith("persp_image")]
            files.append(files[0])
            # Make metrics image
            plot.make_final_input_images_rgb(files, output_folder)
            print("Done")
        except:
            print("Could not render Blender images...")
    else:
        pass
Ejemplo n.º 2
0
def run_geometrics(config_file, ref_path=None, test_path=None, output_path=None,
                   align=True, allow_test_ignore=False, save_aligned=False, save_plots=None):

    # check inputs
    if not os.path.isfile(config_file):
        raise IOError("Configuration file does not exist")

    if output_path is not None and not os.path.isdir(output_path):
        raise IOError('"output_path" not a valid folder <{}>'.format(output_path))

    # parse configuration
    config_path = os.path.dirname(config_file)

    config = geo.parse_config(config_file,
                              refpath=(ref_path or config_path),
                              testpath=(test_path or config_path))

    # Get test model information from configuration file.
    test_dsm_filename = config['INPUT.TEST']['DSMFilename']
    test_dtm_filename = config['INPUT.TEST'].get('DTMFilename', None)
    test_cls_filename = config['INPUT.TEST']['CLSFilename']
    test_mtl_filename = config['INPUT.TEST'].get('MTLFilename', None)

    # Get reference model information from configuration file.
    ref_dsm_filename = config['INPUT.REF']['DSMFilename']
    ref_dtm_filename = config['INPUT.REF']['DTMFilename']
    ref_cls_filename = config['INPUT.REF']['CLSFilename']
    ref_ndx_filename = config['INPUT.REF']['NDXFilename']
    ref_mtl_filename = config['INPUT.REF'].get('MTLFilename', None)

    # Get material label names and list of material labels to ignore in evaluation.
    material_names = config['MATERIALS.REF']['MaterialNames']
    material_indices_to_ignore = config['MATERIALS.REF']['MaterialIndicesToIgnore']

    # Get image pair files
    performer_pair_file = config['INPUT.TEST'].get('ImagePairFilename', None)
    performer_pair_data_file = config['INPUT.TEST'].get('ImagePairDataFilename', None)
    performer_files_chosen_file = config['INPUT.TEST'].get('FilesChosenFilename', None)
    
    # Get plot settings from configuration file
    PLOTS_SHOW = config['PLOTS']['ShowPlots']
    PLOTS_SAVE = config['PLOTS']['SavePlots']
    if save_plots is not None:  # Commandline line argument overrided config file setting
        PLOTS_SAVE = save_plots
    PLOTS_ENABLE = PLOTS_SHOW or PLOTS_SAVE

    # default output path
    if output_path is None:
        output_path = os.path.dirname(test_dsm_filename)

    if align:
        align = config['OPTIONS']['AlignModel']
    save_aligned = config['OPTIONS']['SaveAligned'] | save_aligned

    # Configure plotting
    basename = os.path.basename(test_dsm_filename)
    if PLOTS_ENABLE:
        plot = geo.plot(saveDir=output_path, autoSave=PLOTS_SAVE, savePrefix=basename + '_', badColor='black', showPlots=PLOTS_SHOW, dpi=900)
    else:
        plot = None
        
    # copy testDSM to the output path
    # this is a workaround for the "align3d" function with currently always
    # saves new files to the same path as the testDSM
    src = test_dsm_filename
    dst = os.path.join(output_path, os.path.basename(src))
    if not os.path.isfile(dst): shutil.copyfile(src, dst)
    test_dsm_filename_copy = dst

    # Register test model to ground truth reference model.
    if not align:
        print('\nSKIPPING REGISTRATION')
        xyz_offset = (0.0, 0.0, 0.0)
    else:
        print('\n=====REGISTRATION====='); sys.stdout.flush()
        try:
            align3d_path = config['REGEXEPATH']['Align3DPath']
        except:
            align3d_path = None
        xyz_offset = geo.align3d(ref_dsm_filename, test_dsm_filename_copy, exec_path=align3d_path)

    # Explicitly assign a no data value to warped images to track filled pixels
    no_data_value = -9999

    # Read reference model files.
    print("\nReading reference model files...")
    ref_cls, tform = geo.imageLoad(ref_cls_filename)
    ref_dsm = geo.imageWarp(ref_dsm_filename, ref_cls_filename, noDataValue=no_data_value)
    ref_dtm = geo.imageWarp(ref_dtm_filename, ref_cls_filename, noDataValue=no_data_value)
    ref_ndx = geo.imageWarp(ref_ndx_filename, ref_cls_filename, interp_method=gdalconst.GRA_NearestNeighbour).astype(np.uint16)

    if ref_mtl_filename:
        ref_mtl = geo.imageWarp(ref_mtl_filename, ref_cls_filename, interp_method=gdalconst.GRA_NearestNeighbour).astype(np.uint8)
        if save_aligned:
            geo.arrayToGeotiff(ref_mtl, os.path.join(output_path, basename + '_ref_mtl_reg_out'), ref_cls_filename, no_data_value)
    else:
        ref_mtl = None
        print('NO REFERENCE MTL')

    # Read test model files and apply XYZ offsets.
    print("\nReading test model files...")
    test_cls = geo.imageWarp(test_cls_filename, ref_cls_filename, xyz_offset, gdalconst.GRA_NearestNeighbour)
    test_dsm = geo.imageWarp(test_dsm_filename, ref_cls_filename, xyz_offset, noDataValue=no_data_value)

    if test_dtm_filename:
        test_dtm = geo.imageWarp(test_dtm_filename, ref_cls_filename, xyz_offset, noDataValue=no_data_value)
        if save_aligned:
            geo.arrayToGeotiff(test_dtm, os.path.join(output_path, basename + '_test_dtm_reg_out'), ref_cls_filename,
                               no_data_value)
    else:
        print('NO TEST DTM: defaults to reference DTM')
        test_dtm = ref_dtm

    if save_aligned:
        geo.arrayToGeotiff(test_cls, os.path.join(output_path, basename + '_test_cls_reg_out'), ref_cls_filename,
                           no_data_value)
        geo.arrayToGeotiff(test_dsm, os.path.join(output_path, basename + '_test_dsm_reg_out'), ref_cls_filename,
                           no_data_value)
        geo.arrayToGeotiff(ref_cls, os.path.join(output_path, basename + '_ref_cls_reg_out'), ref_cls_filename,
                           no_data_value)
        geo.arrayToGeotiff(ref_dsm, os.path.join(output_path, basename + '_ref_dsm_reg_out'), ref_cls_filename,
                           no_data_value)
        geo.arrayToGeotiff(ref_dtm, os.path.join(output_path, basename + '_ref_dtm_reg_out'), ref_cls_filename,
                           no_data_value)

    if test_mtl_filename:
        test_mtl = geo.imageWarp(test_mtl_filename, ref_cls_filename, xyz_offset,
                                 gdalconst.GRA_NearestNeighbour).astype(np.uint8)
        if save_aligned:
            geo.arrayToGeotiff(test_mtl, os.path.join(output_path, basename + '_test_mtl_reg_out'), ref_cls_filename,
                               no_data_value)
    else:
        print('NO TEST MTL')

    print("\n\n")

    # Apply registration offset, only to valid data to allow better tracking of bad data
    test_valid_data = (test_dsm != no_data_value)
    if test_dtm_filename:
        test_valid_data &= (test_dtm != no_data_value)

    test_dsm[test_valid_data] = test_dsm[test_valid_data] + xyz_offset[2]
    if test_dtm_filename:
        test_dtm[test_valid_data] = test_dtm[test_valid_data] + xyz_offset[2]

    # Create mask for ignoring points labeled NoData in reference files.
    ref_dsm_no_data_value = no_data_value
    ref_dtm_no_data_value = no_data_value
    ref_cls_no_data_value = geo.getNoDataValue(ref_cls_filename)
    if ref_cls_no_data_value != 65:
        print("WARNING! NODATA TAG IN CLS FILE IS LIKELY INCORRECT. IT SHOULD BE 65.")
        ref_cls_no_data_value = 65
    ignore_mask = np.zeros_like(ref_cls, np.bool)

    if ref_dsm_no_data_value is not None:
        ignore_mask[ref_dsm == ref_dsm_no_data_value] = True
    if ref_dtm_no_data_value is not None:
        ignore_mask[ref_dtm == ref_dtm_no_data_value] = True
    if ref_cls_no_data_value is not None:
        ignore_mask[ref_cls == ref_cls_no_data_value] = True

    # optionally ignore test NoDataValue(s)
    if allow_test_ignore:

        if allow_test_ignore == 1:
            test_cls_no_data_value = geo.getNoDataValue(test_cls_filename)
            if test_cls_no_data_value is not None:
                print('Ignoring test CLS NoDataValue')
                ignore_mask[test_cls == test_cls_no_data_value] = True

        elif allow_test_ignore == 2:
            test_dsm_no_data_value = no_data_value
            test_dtm_no_data_value = no_data_value
            if test_dsm_no_data_value is not None:
                print('Ignoring test DSM NoDataValue')
                ignore_mask[test_dsm == test_dsm_no_data_value] = True
            if test_dtm_filename and test_dtm_no_data_value is not None:
                print('Ignoring test DTM NoDataValue')
                ignore_mask[test_dtm == test_dtm_no_data_value] = True

        else:
            raise IOError('Unrecognized test ignore value={}'.format(allow_test_ignore))

        print("")

    # sanity check
    if np.all(ignore_mask):
        raise ValueError('All pixels are ignored')

    # report "data voids"
    num_data_voids = np.sum(ignore_mask > 0)
    print('Number of data voids in ignore mask = ', num_data_voids)

    # If quantizing to voxels, then match vertical spacing to horizontal spacing.
    QUANTIZE = config['OPTIONS']['QuantizeHeight']
    if QUANTIZE:
        unit_hgt = geo.getUnitHeight(tform)
        ref_dsm = np.round(ref_dsm / unit_hgt) * unit_hgt
        ref_dtm = np.round(ref_dtm / unit_hgt) * unit_hgt
        test_dsm = np.round(test_dsm / unit_hgt) * unit_hgt
        test_dtm = np.round(test_dtm / unit_hgt) * unit_hgt
        no_data_value = np.round(no_data_value / unit_hgt) * unit_hgt

    if PLOTS_ENABLE:
        # Make image pair plots
        plot.make_image_pair_plots(performer_pair_data_file, performer_pair_file, performer_files_chosen_file, 201, saveName="image_pair_plot")
        # Reference models can include data voids, so ignore invalid data on display
        plot.make(ref_dsm, 'Reference DSM', 111, colorbar=True, saveName="input_refDSM", badValue=no_data_value)
        plot.make(ref_dtm, 'Reference DTM', 112, colorbar=True, saveName="input_refDTM", badValue=no_data_value)
        plot.make(ref_cls, 'Reference Classification', 113,  colorbar=True, saveName="input_refClass")

        # Test models shouldn't have any invalid data
        # so display the invalid values to highlight them,
        # unlike with the refSDM/refDTM
        plot.make(test_dsm, 'Test DSM', 151, colorbar=True, saveName="input_testDSM")
        plot.make(test_dtm, 'Test DTM', 152, colorbar=True, saveName="input_testDTM")
        plot.make(test_cls, 'Test Classification', 153, colorbar=True, saveName="input_testClass")

        plot.make(ignore_mask, 'Ignore Mask', 181, saveName="input_ignoreMask")

        # material maps
        if ref_mtl_filename and test_mtl_filename:
            plot.make(ref_mtl, 'Reference Materials', 191, colorbar=True, saveName="input_refMTL", vmin=0, vmax=13)
            plot.make(test_mtl, 'Test Materials', 192, colorbar=True, saveName="input_testMTL", vmin=0, vmax=13)

    # Run the threshold geometry metrics and report results.
    metrics = dict()

    # Run threshold geometry and relative accuracy
    threshold_geometry_results = []
    relative_accuracy_results = []
    objectwise_results = []

    # Check that match values are valid
    ref_cls_match_sets, test_cls_match_sets = geo.getMatchValueSets(config['INPUT.REF']['CLSMatchValue'],
                                                                    config['INPUT.TEST']['CLSMatchValue'],
                                                                    np.unique(ref_cls).tolist(),
                                                                    np.unique(test_cls).tolist())

    if PLOTS_ENABLE:
        # Update plot prefix include counter to be unique for each set of CLS value evaluated
        original_save_prefix = plot.savePrefix

    # Loop through sets of CLS match values
    for index, (ref_match_value,test_match_value) in enumerate(zip(ref_cls_match_sets, test_cls_match_sets)):
        print("Evaluating CLS values")
        print("  Reference match values: " + str(ref_match_value))
        print("  Test match values: " + str(test_match_value))

        # object masks based on CLSMatchValue(s)
        ref_mask = np.zeros_like(ref_cls, np.bool)
        for v in ref_match_value:
            ref_mask[ref_cls == v] = True

        test_mask = np.zeros_like(test_cls, np.bool)
        if len(test_match_value):
            for v in test_match_value:
                test_mask[test_cls == v] = True

        if PLOTS_ENABLE:
            plot.savePrefix = original_save_prefix + "%03d" % index + "_"
            plot.make(test_mask.astype(np.int), 'Test Evaluation Mask', 154, saveName="input_testMask")
            plot.make(ref_mask.astype(np.int), 'Reference Evaluation Mask', 114, saveName="input_refMask")

        if config['OBJECTWISE']['Enable']:
            try:
                print("\nRunning objectwise metrics...")
                merge_radius = config['OBJECTWISE']['MergeRadius']
                [result, test_ndx, ref_ndx] = geo.run_objectwise_metrics(ref_dsm, ref_dtm, ref_mask, test_dsm, test_dtm,
                                                                         test_mask, tform, ignore_mask, merge_radius,
                                                                         plot=plot)
                if ref_match_value == test_match_value:
                    result['CLSValue'] = ref_match_value
                else:
                    result['CLSValue'] = {'Ref': ref_match_value, "Test": test_match_value}
                objectwise_results.append(result)
    
                # Save index files to compute objectwise metrics
                obj_save_prefix = basename + "_%03d" % index + "_"
                geo.arrayToGeotiff(test_ndx, os.path.join(output_path, obj_save_prefix + '_test_ndx_objs'),
                                   ref_cls_filename, no_data_value)
                geo.arrayToGeotiff(ref_ndx, os.path.join(output_path, obj_save_prefix + '_ref_ndx_objs'), ref_cls_filename,
                                   no_data_value)
            except Exception as e:
                print(str(e))

        # Evaluate threshold geometry metrics using refDTM as the testDTM to mitigate effects of terrain modeling
        # uncertainty
        result, _ = geo.run_threshold_geometry_metrics(ref_dsm, ref_dtm, ref_mask, test_dsm, ref_dtm, test_mask, tform,
                                                    ignore_mask, plot=plot)
        if ref_match_value == test_match_value:
            result['CLSValue'] = ref_match_value
        else:
            result['CLSValue'] = {'Ref': ref_match_value, "Test": test_match_value}
        threshold_geometry_results.append(result)

        # Run the relative accuracy metrics and report results.
        # Skip relative accuracy is all of testMask or refMask is assigned as "object"
        if not ((ref_mask.size == np.count_nonzero(ref_mask)) or (test_mask.size == np.count_nonzero(test_mask))) and len(test_match_value) != 0:
            try:
                result = geo.run_relative_accuracy_metrics(ref_dsm, test_dsm, ref_mask, test_mask, ignore_mask,
                                                           geo.getUnitWidth(tform), plot=plot)
                if ref_match_value == test_match_value:
                    result['CLSValue'] = ref_match_value
                else:
                    result['CLSValue'] = {'Ref': ref_match_value, "Test": test_match_value}
                relative_accuracy_results.append(result)
            except Exception as e:
                print(str(e))

    if PLOTS_ENABLE:
        # Reset plot prefix
        plot.savePrefix = original_save_prefix

    metrics['threshold_geometry'] = threshold_geometry_results
    metrics['relative_accuracy'] = relative_accuracy_results
    metrics['objectwise'] = objectwise_results

    if align:
        metrics['registration_offset'] = xyz_offset
        metrics['geolocation_error'] = np.linalg.norm(xyz_offset)

    # Run the terrain model metrics and report results.
    if test_dtm_filename:
        dtm_z_threshold = config['OPTIONS'].get('TerrainZErrorThreshold', 1)

        # Make reference mask for terrain evaluation that identified elevated object where underlying terrain estimate
        # is expected to be inaccurate
        dtm_cls_ignore_values = config['INPUT.REF'].get('TerrainCLSIgnoreValues', [6, 17]) # Default to building and bridge deck
        dtm_cls_ignore_values = geo.validateMatchValues(dtm_cls_ignore_values,np.unique(ref_cls).tolist())
        ref_mask_terrain_acc = np.zeros_like(ref_cls, np.bool)
        for v in dtm_cls_ignore_values:
            ref_mask_terrain_acc[ref_cls == v] = True

        metrics['terrain_accuracy'] = geo.run_terrain_accuracy_metrics(ref_dtm, test_dtm, ref_mask_terrain_acc,
                                                                       dtm_z_threshold, plot=plot)
    else:
        print('WARNING: No test DTM file, skipping terrain accuracy metrics')

    # Run the threshold material metrics and report results.
    if test_mtl_filename and ref_mtl:
        metrics['threshold_materials'] = geo.run_material_metrics(ref_ndx, ref_mtl, test_mtl, material_names,
                                                                  material_indices_to_ignore, plot=plot)
    else:
        print('WARNING: No test MTL file or no reference material, skipping material metrics')

    fileout = os.path.join(output_path, os.path.basename(config_file) + "_metrics.json")
    with open(fileout, 'w') as fid:
        json.dump(metrics, fid,indent=2)
    print(json.dumps(metrics, indent=2))
    print("Metrics report: " + fileout)

    #  If displaying figures, wait for user before existing
    if PLOTS_SHOW:
            input("Press Enter to continue...")
def summarize_metrics(root_dir, teams, aois, ref_path=None, test_path=None):
    # load results
    is_config = True
    all_results = {}
    all_config = {}
    # Parse results
    for current_team in teams:
        for current_aoi in aois:
            metrics_json_filepath = None
            current_dir = Path(root_dir, current_team, current_aoi)
            for file in glob.glob(
                    os.path.join(current_dir, "*.config_metrics.json")):
                results_path = file
                metrics_json_filepath = Path(results_path)
            # metrics_json_filepath = Path(root_dir, current_team, current_aoi, "%s.config_metrics.json" % current_aoi)
            if metrics_json_filepath.is_file():
                with open(str(metrics_json_filepath.absolute())) as json_file:
                    json_data = json.load(json_file)
                # Check offset file
                current_dir = Path(root_dir, current_team, current_aoi)
                offset_file_path = None
                for file in glob.glob(os.path.join(current_dir,
                                                   "*offsets.txt")):
                    offset_file_path = file
                    offset_file_path = Path(offset_file_path)
                # offset_file_path = Path(root_dir, current_team, "%s.offset.txt" % current_aoi)
                if offset_file_path.is_file():
                    with open(str(offset_file_path.absolute())) as offset_file:
                        if offset_file_path.suffix is ".json":
                            offset_data = json.load(offset_file)
                        else:
                            offset_data = {}
                            for last_line in offset_file:
                                try:
                                    offset_data["offset"] = [
                                        float(idx)
                                        for idx in last_line.split()
                                    ]
                                except ValueError:
                                    continue
                        n = {}
                        n["threshold_geometry"] = json_data[
                            "threshold_geometry"]
                        n["relative_accuracy"] = json_data["relative_accuracy"]
                        n["registration_offset"] = offset_data["offset"]
                        n["geolocation_error"] = np.linalg.norm(
                            n["registration_offset"], 2)
                        n["terrain_accuracy"] = None
                        json_data = n
                        del n, offset_data

                if "terrain_accuracy" in json_data.keys():
                    n = {}
                    n["threshold_geometry"] = {}
                    n["relative_accuracy"] = {}
                    n["objectwise"] = {}
                    classes_skipped = 0
                    for cls in range(
                            0, json_data["threshold_geometry"].__len__()):
                        current_class = json_data["threshold_geometry"][cls][
                            'CLSValue'][0]
                        if np.isnan(json_data["threshold_geometry"][cls]['2D']
                                    ['fscore']):
                            classes_skipped = classes_skipped + 1
                            continue
                        n["threshold_geometry"].update({
                            current_class:
                            json_data["threshold_geometry"][cls]
                        })
                        n["relative_accuracy"].update({
                            current_class:
                            json_data["relative_accuracy"][cls -
                                                           classes_skipped]
                        })
                        try:
                            n["objectwise"].update(
                                {current_class: json_data["objectwise"][cls]})
                        except KeyError:
                            print('No objectwise metrics found...')
                        except IndexError:
                            print('Classification doesnt exist...')
                    n["registration_offset"] = json_data["registration_offset"]
                    n["geolocation_error"] = json_data["geolocation_error"]
                    n["terrain_accuracy"] = None
                    try:
                        n["instance_f1"] = json_data["objectwise"][cls][
                            "instance_f1"]
                        n["instance_f1_merge_fp"] = json_data["objectwise"][
                            cls]["instance_f1_merge_fp"]
                        n["instance_f1_merge_fn"] = json_data["objectwise"][
                            cls]["instance_f1_merge_fn"]
                    except KeyError:
                        n["instance_f1"] = np.nan
                        n["instance_f1_merge_fp"] = np.nan
                        n["instance_f1_merge_fn"] = np.nan
                    except IndexError:
                        n["instance_f1"] = np.nan
                        n["instance_f1_merge_fp"] = np.nan
                        n["instance_f1_merge_fn"] = np.nan
                        print('Classification doesnt exist...')

                    json_data = n
                    del n

                container = Result(current_team, current_aoi, json_data)
                if current_team not in all_results.keys():
                    all_results[current_team] = {}
                all_results[current_team].update({current_aoi: container})
            else:
                container = Result(current_team, current_aoi, "")
                all_results[current_team] = {current_aoi: container}

            # Try to find config file
            current_dir = Path(root_dir, current_team, current_aoi)
            config_path = None
            for file in glob.glob(os.path.join(current_dir, "*.config")):
                config_path = file
                config_path = Path(config_path)
            # config_path = Path(root_dir, current_team, current_aoi, current_aoi + '.config')
            if config_path.is_file():
                config = geo.parse_config(
                    str(config_path.absolute()),
                    refpath=(ref_path or str(config_path.parent)),
                    testpath=(test_path or str(config_path.parent)))
            elif Path(config_path.parent,
                      config_path.stem + ".json").is_file():
                print('Old config file, parsing via json...')
                is_config = False
                config_path = Path(config_path.parent,
                                   config_path.stem + ".json")
                with open(str(config_path.absolute())) as config_file_json:
                    config = json.load(config_file_json)

            # Flatten list in case of json/config discrepencies
            if not is_config:
                config["INPUT.REF"]["CLSMatchValue"] = [
                    item for sublist in config["INPUT.REF"]["CLSMatchValue"]
                    for item in sublist
                ]
            # Store config for each aoi
            if current_team not in all_config.keys():
                all_config[current_team] = {}
            if current_aoi not in all_config.keys():
                all_config[current_team][current_aoi] = {}
            all_config[current_team][current_aoi] = config
            all_config[current_team][current_aoi].update({'path': config_path})

    # compute averaged metrics and write out objectwise metrics
    with open('objectwise_metrics.csv', mode='w') as objectwise_file:
        csv_writer = csv.writer(objectwise_file,
                                delimiter=',',
                                quotechar='"',
                                quoting=csv.QUOTE_MINIMAL)
        csv_writer.writerow(
            ['Building Index', '2D IOU', '3D IOU', 'HRMSE', 'ZRMSE'])
        averaged_results = {}
        for team in all_results:
            sum_2d_completeness = {}
            sum_2d_correctness = {}
            sum_2d_jaccard_index = {}
            sum_2d_fscore = {}
            sum_3d_completeness = {}
            sum_3d_correctness = {}
            sum_3d_jaccard_index = {}
            sum_3d_fscore = {}
            sum_geolocation_error = 0
            sum_hrmse = {}
            sum_zrmse = {}
            averaged_results[team] = {}
            evaluated_classes = []
            for aoi in all_results[team]:
                sum_geolocation_error = sum_geolocation_error + all_results[
                    team][aoi].results["geolocation_error"]
                for cls in all_results[team][aoi].results[
                        "threshold_geometry"]:
                    evaluated_classes.append(cls)
                    if cls not in sum_2d_completeness.keys():
                        sum_2d_completeness[cls] = 0
                        sum_2d_correctness[cls] = 0
                        sum_2d_jaccard_index[cls] = 0
                        sum_2d_fscore[cls] = 0
                        sum_3d_completeness[cls] = 0
                        sum_3d_correctness[cls] = 0
                        sum_3d_jaccard_index[cls] = 0
                        sum_3d_fscore[cls] = 0
                        sum_zrmse[cls] = 0
                        sum_hrmse[cls] = 0
                    sum_2d_completeness[cls] = sum_2d_completeness[
                        cls] + all_results[team][aoi].results[
                            'threshold_geometry'][cls]['2D']['completeness']
                    sum_2d_correctness[cls] = sum_2d_correctness[
                        cls] + all_results[team][aoi].results[
                            'threshold_geometry'][cls]['2D']['correctness']
                    sum_2d_jaccard_index[cls] = sum_2d_jaccard_index[
                        cls] + all_results[team][aoi].results[
                            'threshold_geometry'][cls]['2D']['jaccardIndex']
                    sum_2d_fscore[cls] = sum_2d_fscore[cls] + all_results[team][
                        aoi].results['threshold_geometry'][cls]['2D']['fscore']
                    sum_3d_completeness[cls] = sum_3d_completeness[
                        cls] + all_results[team][aoi].results[
                            'threshold_geometry'][cls]['3D']['completeness']
                    sum_3d_correctness[cls] = sum_3d_correctness[
                        cls] + all_results[team][aoi].results[
                            'threshold_geometry'][cls]['3D']['correctness']
                    sum_3d_jaccard_index[cls] = sum_3d_jaccard_index[
                        cls] + all_results[team][aoi].results[
                            'threshold_geometry'][cls]['3D']['jaccardIndex']
                    sum_3d_fscore[cls] = sum_3d_fscore[cls] + all_results[team][
                        aoi].results['threshold_geometry'][cls]['3D']['fscore']
                    sum_hrmse[cls] = sum_hrmse[cls] + all_results[team][
                        aoi].results['relative_accuracy'][cls]["hrmse"]
                    sum_zrmse[cls] = sum_zrmse[cls] + all_results[team][
                        aoi].results['relative_accuracy'][cls]["zrmse"]
            # Average results for evaluated classes in config file
            averaged_results[team]["geolocation_error"] = np.round(
                sum_geolocation_error / all_results[team].__len__(),
                decimals=2)
            # TODO: Need to make config specific to each config file, but for now it doesn't matter
            for cls in np.unique(evaluated_classes):
                try:
                    averaged_results[team][cls] = {}
                    averaged_results[team][cls]["2d_completeness"] = np.round(
                        sum_2d_completeness[cls] /
                        evaluated_classes.count(cls),
                        decimals=2)
                    averaged_results[team][cls]["2d_correctness"] = np.round(
                        sum_2d_correctness[cls] / evaluated_classes.count(cls),
                        decimals=2)
                    averaged_results[team][cls]["2d_jaccard_index"] = np.round(
                        sum_2d_jaccard_index[cls] /
                        evaluated_classes.count(cls),
                        decimals=2)
                    averaged_results[team][cls]["2d_fscore"] = np.round(
                        sum_2d_fscore[cls] / evaluated_classes.count(cls),
                        decimals=2)
                    averaged_results[team][cls]["3d_completeness"] = np.round(
                        sum_3d_completeness[cls] /
                        evaluated_classes.count(cls),
                        decimals=2)
                    averaged_results[team][cls]["3d_correctness"] = np.round(
                        sum_3d_correctness[cls] / evaluated_classes.count(cls),
                        decimals=2)
                    averaged_results[team][cls]["3d_jaccard_index"] = np.round(
                        sum_3d_jaccard_index[cls] /
                        evaluated_classes.count(cls),
                        decimals=2)
                    averaged_results[team][cls]["fscore"] = np.round(
                        sum_3d_fscore[cls] / evaluated_classes.count(cls),
                        decimals=2)
                    averaged_results[team][cls]["hrmse"] = np.round(
                        sum_hrmse[cls] / evaluated_classes.count(cls),
                        decimals=2)
                    averaged_results[team][cls]["zrmse"] = np.round(
                        sum_zrmse[cls] / evaluated_classes.count(cls),
                        decimals=2)
                except KeyError:
                    print('Class not found, skipping...')
                    continue

    return averaged_results, all_results, all_config
Ejemplo n.º 4
0
def summarize_metrics(root_dir, teams, aois, ref_path=None, test_path=None):
    # load results
    is_config = True
    all_results = {}
    all_config = {}
    # Parse results
    for current_team in teams:
        for current_aoi in aois:
            metrics_json_filepath = Path(
                root_dir, current_team, current_aoi,
                "%s.config_metrics.json" % current_aoi)
            if metrics_json_filepath.is_file():
                with open(str(metrics_json_filepath.absolute())) as json_file:
                    json_data = json.load(json_file)
                # Check offset file
                offset_file_path = Path(root_dir, current_team,
                                        "%s.offset.txt" % current_aoi)
                if offset_file_path.is_file():
                    with open(str(offset_file_path.absolute())) as offset_file:
                        if offset_file_path.suffix is ".json":
                            offset_data = json.load(offset_file)
                        else:
                            offset_data = offset_file.readline()
                        n = {}
                        n["threshold_geometry"] = json_data[
                            "threshold_geometry"]
                        n["relative_accuracy"] = json_data["relative_accuracy"]
                        n["registration_offset"] = offset_data["offset"]
                        n["gelocation_error"] = np.linalg.norm(
                            n["registration_offset"], 2)
                        n["terrain_accuracy"] = None
                        json_data = n
                        del n, offset_data

                if "terrain_accuracy" in json_data.keys():
                    n = {}
                    n["threshold_geometry"] = {}
                    n["relative_accuracy"] = {}
                    n["objectwise"] = {}
                    for cls in range(
                            0, json_data["threshold_geometry"].__len__()):
                        current_class = json_data["threshold_geometry"][cls][
                            'CLSValue'][0]
                        n["threshold_geometry"].update({
                            current_class:
                            json_data["threshold_geometry"][cls]
                        })
                        n["relative_accuracy"].update({
                            current_class:
                            json_data["relative_accuracy"][cls]
                        })
                        try:
                            n["objectwise"].update(
                                {current_class: json_data["objectwise"][cls]})
                        except KeyError:
                            print('No objectwise metrics found...')
                    n["registration_offset"] = json_data["registration_offset"]
                    n["geolocation_error"] = json_data["geolocation_error"]
                    n["terrain_accuracy"] = None
                    json_data = n
                    del n

                container = Result(current_team, current_aoi, json_data)
                if current_team not in all_results.keys():
                    all_results[current_team] = {}
                all_results[current_team].update({current_aoi: container})
            else:
                container = Result(current_team, current_aoi, "")
                all_results[current_team] = {current_aoi: container}

            # Try to find config file
            config_path = Path(root_dir, current_team, current_aoi,
                               current_aoi + '.config')
            if config_path.is_file():
                config = geo.parse_config(
                    str(config_path.absolute()),
                    refpath=(ref_path or str(config_path.parent)),
                    testpath=(test_path or str(config_path.parent)))
            elif Path(config_path.parent,
                      config_path.stem + ".json").is_file():
                print('Old config file, parsing via json...')
                is_config = False
                config_path = Path(config_path.parent,
                                   config_path.stem + ".json")
                with open(str(config_path.absolute())) as config_file_json:
                    config = json.load(config_file_json)

            # Flatten list in case of json/config discrepencies
            if not is_config:
                config["INPUT.REF"]["CLSMatchValue"] = [
                    item for sublist in config["INPUT.REF"]["CLSMatchValue"]
                    for item in sublist
                ]
            # Store config for each aoi
            if current_team not in all_config.keys():
                all_config[current_team] = {}
            if current_aoi not in all_config.keys():
                all_config[current_team][current_aoi] = {}
            all_config[current_team][current_aoi] = config
            all_config[current_team][current_aoi].update({'path': config_path})

    # compute averaged metrics
    averaged_results = {}
    for team in all_results:
        sum_2d_completeness = {}
        sum_2d_correctness = {}
        sum_2d_jaccard_index = {}
        sum_2d_fscore = {}
        sum_3d_completeness = {}
        sum_3d_correctness = {}
        sum_3d_jaccard_index = {}
        sum_3d_fscore = {}
        sum_geolocation_error = 0
        sum_hrmse = {}
        sum_zrmse = {}
        averaged_results[team] = {}
        for aoi in all_results[team]:
            sum_geolocation_error = sum_geolocation_error + all_results[team][
                aoi].results["geolocation_error"]
            for cls in all_results[team][aoi].results["threshold_geometry"]:
                if cls not in sum_2d_completeness.keys():
                    sum_2d_completeness[cls] = 0
                    sum_2d_correctness[cls] = 0
                    sum_2d_jaccard_index[cls] = 0
                    sum_2d_fscore[cls] = 0
                    sum_3d_completeness[cls] = 0
                    sum_3d_correctness[cls] = 0
                    sum_3d_jaccard_index[cls] = 0
                    sum_3d_fscore[cls] = 0
                    sum_zrmse[cls] = 0
                    sum_hrmse[cls] = 0
                sum_2d_completeness[cls] = sum_2d_completeness[
                    cls] + all_results[team][aoi].results[
                        'threshold_geometry'][cls]['2D']['completeness']
                sum_2d_correctness[cls] = sum_2d_correctness[
                    cls] + all_results[team][aoi].results[
                        'threshold_geometry'][cls]['2D']['correctness']
                sum_2d_jaccard_index[cls] = sum_2d_jaccard_index[
                    cls] + all_results[team][aoi].results[
                        'threshold_geometry'][cls]['2D']['jaccardIndex']
                sum_2d_fscore[cls] = sum_2d_fscore[cls] + all_results[team][
                    aoi].results['threshold_geometry'][cls]['2D']['fscore']
                sum_3d_completeness[cls] = sum_3d_completeness[
                    cls] + all_results[team][aoi].results[
                        'threshold_geometry'][cls]['3D']['completeness']
                sum_3d_correctness[cls] = sum_3d_correctness[
                    cls] + all_results[team][aoi].results[
                        'threshold_geometry'][cls]['3D']['correctness']
                sum_3d_jaccard_index[cls] = sum_3d_jaccard_index[
                    cls] + all_results[team][aoi].results[
                        'threshold_geometry'][cls]['3D']['jaccardIndex']
                sum_3d_fscore[cls] = sum_3d_fscore[cls] + all_results[team][
                    aoi].results['threshold_geometry'][cls]['3D']['fscore']
                sum_hrmse[cls] = sum_hrmse[cls] + all_results[team][
                    aoi].results['relative_accuracy'][cls]["hrmse"]
                sum_zrmse[cls] = sum_zrmse[cls] + all_results[team][
                    aoi].results['relative_accuracy'][cls]["zrmse"]
        # Average results for evaluated classes in config file
        averaged_results[team]["geolocation_error"] = np.round(
            sum_geolocation_error / all_results[team].__len__(), decimals=2)
        # TODO: Need to make config specific to each config file, but for now it doesn't matter
        for cls in config["INPUT.REF"]["CLSMatchValue"]:
            try:
                averaged_results[team][cls] = {}
                averaged_results[team][cls]["2d_completeness"] = np.round(
                    sum_2d_completeness[cls] / all_results[team].__len__(),
                    decimals=2)
                averaged_results[team][cls]["2d_correctness"] = np.round(
                    sum_2d_correctness[cls] / all_results[team].__len__(),
                    decimals=2)
                averaged_results[team][cls]["2d_jaccard_index"] = np.round(
                    sum_2d_jaccard_index[cls] / all_results[team].__len__(),
                    decimals=2)
                averaged_results[team][cls]["2d_fscore"] = np.round(
                    sum_2d_fscore[cls] / all_results[team].__len__(),
                    decimals=2)
                averaged_results[team][cls]["3d_completeness"] = np.round(
                    sum_3d_completeness[cls] / all_results[team].__len__(),
                    decimals=2)
                averaged_results[team][cls]["3d_correctness"] = np.round(
                    sum_3d_correctness[cls] / all_results[team].__len__(),
                    decimals=2)
                averaged_results[team][cls]["3d_jaccard_index"] = np.round(
                    sum_3d_jaccard_index[cls] / all_results[team].__len__(),
                    decimals=2)
                averaged_results[team][cls]["fscore"] = np.round(
                    sum_3d_fscore[cls] / all_results[team].__len__(),
                    decimals=2)
                averaged_results[team][cls]["hrmse"] = np.round(
                    sum_hrmse[cls] / all_results[team].__len__(), decimals=2)
                averaged_results[team][cls]["zrmse"] = np.round(
                    sum_zrmse[cls] / all_results[team].__len__(), decimals=2)
            except KeyError:
                print('Class not found, skipping...')
                continue

    return averaged_results, all_results, all_config
Ejemplo n.º 5
0
def run_geometrics(configfile,
                   refpath=None,
                   testpath=None,
                   outputpath=None,
                   align=True):

    # check inputs
    if not os.path.isfile(configfile):
        raise IOError("Configuration file does not exist")

    if outputpath is not None and not os.path.isdir(outputpath):
        raise IOError(
            '"outputpath" not a valid folder <{}>'.format(outputpath))

    # parse configuration
    configpath = os.path.dirname(configfile)

    config = geo.parse_config(configfile,
                              refpath=(refpath or configpath),
                              testpath=(testpath or configpath))

    # Get test model information from configuration file.
    testDSMFilename = config['INPUT.TEST']['DSMFilename']
    testDTMFilename = config['INPUT.TEST'].get('DTMFilename', None)
    testCLSFilename = config['INPUT.TEST']['CLSFilename']
    testMTLFilename = config['INPUT.TEST'].get('MTLFilename', None)

    # Get reference model information from configuration file.
    refDSMFilename = config['INPUT.REF']['DSMFilename']
    refDTMFilename = config['INPUT.REF']['DTMFilename']
    refCLSFilename = config['INPUT.REF']['CLSFilename']
    refNDXFilename = config['INPUT.REF']['NDXFilename']
    refMTLFilename = config['INPUT.REF']['MTLFilename']

    # Get material label names and list of material labels to ignore in evaluation.
    materialNames = config['MATERIALS.REF']['MaterialNames']
    materialIndicesToIgnore = config['MATERIALS.REF'][
        'MaterialIndicesToIgnore']

    # default output path
    if outputpath is None:
        outputpath = os.path.dirname(testDSMFilename)

    # copy testDSM to the output path
    # this is a workaround for the "align3d" function with currently always
    # saves new files to the same path as the testDSM
    src = testDSMFilename
    dst = os.path.join(outputpath, os.path.basename(src))
    if not os.path.isfile(dst): shutil.copyfile(src, dst)
    testDSMFilename_copy = dst

    # Register test model to ground truth reference model.
    if not align:
        print('\nSKIPPING REGISTRATION')
        xyzOffset = (0.0, 0.0, 0.0)
    else:
        print('\n=====REGISTRATION=====')
        sys.stdout.flush()
        try:
            align3d_path = config['REGEXEPATH']['Align3DPath']
        except:
            align3d_path = None
        xyzOffset = geo.align3d(refDSMFilename,
                                testDSMFilename_copy,
                                exec_path=align3d_path)

    # Read reference model files.
    print("")
    print("Reading reference model files...")
    refCLS, tform = geo.imageLoad(refCLSFilename)
    refDSM = geo.imageWarp(refDSMFilename, refCLSFilename)
    refDTM = geo.imageWarp(refDTMFilename, refCLSFilename)
    refNDX = geo.imageWarp(
        refNDXFilename,
        refCLSFilename,
        interp_method=gdalconst.GRA_NearestNeighbour).astype(np.uint16)
    refMTL = geo.imageWarp(
        refMTLFilename,
        refCLSFilename,
        interp_method=gdalconst.GRA_NearestNeighbour).astype(np.uint8)

    # Read test model files and apply XYZ offsets.
    print("Reading test model files...")
    print("")
    testCLS = geo.imageWarp(testCLSFilename, refCLSFilename, xyzOffset,
                            gdalconst.GRA_NearestNeighbour)
    testDSM = geo.imageWarp(testDSMFilename, refCLSFilename, xyzOffset)
    testDSM = testDSM + xyzOffset[2]

    if testDTMFilename:
        testDTM = geo.imageWarp(testDTMFilename, refCLSFilename, xyzOffset)
        testDTM = testDTM + xyzOffset[2]
    else:
        print('NO TEST DTM: defaults to reference DTM')
        testDTM = refDTM

    if testMTLFilename:
        testMTL = geo.imageWarp(testMTLFilename, refCLSFilename, xyzOffset,
                                gdalconst.GRA_NearestNeighbour).astype(
                                    np.uint8)

    # object masks based on CLSMatchValue(s)
    refMask = np.zeros_like(refCLS, np.bool)
    for v in config['INPUT.REF']['CLSMatchValue']:
        refMask[refCLS == v] = True

    testMask = np.zeros_like(testCLS, np.bool)
    for v in config['INPUT.TEST']['CLSMatchValue']:
        testMask[testCLS == v] = True

    # Create mask for ignoring points labeled NoData in reference files.
    refDSM_NoDataValue = geo.getNoDataValue(refDSMFilename)
    refDTM_NoDataValue = geo.getNoDataValue(refDTMFilename)
    refCLS_NoDataValue = geo.getNoDataValue(refCLSFilename)
    ignoreMask = np.zeros_like(refCLS, np.bool)

    if refDSM_NoDataValue is not None:
        ignoreMask[refDSM == refDSM_NoDataValue] = True
    if refDTM_NoDataValue is not None:
        ignoreMask[refDTM == refDTM_NoDataValue] = True
    if refCLS_NoDataValue is not None:
        ignoreMask[refCLS == refCLS_NoDataValue] = True

    # If quantizing to voxels, then match vertical spacing to horizontal spacing.
    QUANTIZE = config['OPTIONS']['QuantizeHeight']
    if QUANTIZE:
        unitHgt = (np.abs(tform[1]) + abs(tform[5])) / 2
        refDSM = np.round(refDSM / unitHgt) * unitHgt
        refDTM = np.round(refDTM / unitHgt) * unitHgt
        testDSM = np.round(testDSM / unitHgt) * unitHgt
        testDTM = np.round(testDTM / unitHgt) * unitHgt

    # Run the threshold geometry metrics and report results.
    metrics = geo.run_threshold_geometry_metrics(refDSM, refDTM, refMask,
                                                 testDSM, testDTM, testMask,
                                                 tform, ignoreMask)

    metrics['offset'] = xyzOffset

    fileout = os.path.join(outputpath,
                           os.path.basename(testDSMFilename) + "_metrics.json")
    with open(fileout, 'w') as fid:
        json.dump(metrics, fid, indent=2)
    print(json.dumps(metrics, indent=2))

    # Run the threshold material metrics and report results.
    if testMTLFilename:
        geo.run_material_metrics(refNDX, refMTL, testMTL, materialNames,
                                 materialIndicesToIgnore)
    else:
        print('WARNING: No test MTL file, skipping material metrics')