def download_file(file_dict, folder): log = Logger('Download') if not file_dict['name']: log.warning('Missing file name in folder {}'.format(folder)) return if not file_dict['downloadUrl'] or file_dict['downloadUrl'].lower() == '?download': log.warning('Missing download URL in folder {}'.format(folder)) return file_path = os.path.join(folder, file_dict['name']) if not os.path.isdir(folder): os.makedirs(folder) # Write file info as JSON with open( os.path.splitext(file_path)[0] + '.json', 'w') as json_file: json.dump(file_dict, json_file) # Skip files that exist unless they are zero bytes in which case remove them if os.path.isfile(file_path): if os.stat(file_path).st_size == 0: log.warning('Removing zero byte file {}'.format(file_path)) os.remove(file_path) else: return # Download missing file with open(file_path, 'w+b') as f: response = APIGet(file_dict['downloadUrl'], absolute=True) f.write(response.content) log.info('Downloaded missing file {}'.format(file_path))
def add_report(self, parent_node, rs_lyr, replace=False): log = Logger('add_html_report') file_path = os.path.join(os.path.dirname(self.xml_path), rs_lyr.rel_path) self.add_dataset(parent_node, file_path, rs_lyr, 'HTMLFile', replace) log.info('Report node created: {}'.format(file_path)) return file_path
def main(): # parse command line options parser = argparse.ArgumentParser() parser.add_argument('--jsonfile', help='The sync file. Helps speed a process up to figure out which files to work with.', default="topomover.json", type=str) parser.add_argument('--verbose', help = 'Get more information in your logs.', action='store_true', default=False) logg = Logger("CADExport") logfile = os.path.join(os.path.dirname(__file__), "TopoMover.log") logg.setup(logPath=logfile, verbose=False) logging.getLogger("boto3").setLevel(logging.ERROR) args = parser.parse_args() try: topomover(args.jsonfile) except (MissingException, NetworkException, DataException) as e: traceback.print_exc(file=sys.stdout) sys.exit(e.returncode) except AssertionError as e: traceback.print_exc(file=sys.stdout) sys.exit(1) except Exception as e: traceback.print_exc(file=sys.stdout) sys.exit(1) sys.exit(0)
def driftBiomassDensity(visitMetrics, driftInverts, driftInvertResults, sampleBiomasses): log = Logger("driftBiomassDensity") if driftInverts is None or driftInverts["values"].__len__() == 0: visitMetrics["DriftBiomassDensity"] = None return if driftInvertResults is None or driftInvertResults["values"].__len__() == 0: visitMetrics["DriftBiomassDensity"] = None return if sampleBiomasses is None: visitMetrics["DriftBiomassDensity"] = None return volumes = [s["value"]["VolumeSampled"] for s in driftInverts["values"]] if any([v is None for v in volumes]): log.warning("VolumeSampled contains 'None'") sumVolumeSampled = np.sum([v for v in volumes if v is not None]) sampleResult = next((i for i in driftInvertResults["values"])) sumSampleBiomass = np.sum([s["value"]["DryMassGrams"] / sampleResult["value"]["PortionOfSampleSorted"] for s in sampleBiomasses["values"]]) visitMetrics["DriftBiomassDensity"] = None if sumVolumeSampled > 0: visitMetrics["DriftBiomassDensity"] = sumSampleBiomass / sumVolumeSampled
def move_measurements(old_folder, new_folder): log = Logger('Move Measurements') log.setup(logPath=os.path.join(new_folder, datetime.now().strftime("%Y%m%d-%H%M%S") + 'move_measurements.log')) # Create a List measurements = list() # Iterate over the directory tree and check if directory is empty. for (dirpath, dirnames, filenames) in os.walk(old_folder): for file in filenames: measurements.append(os.path.join(dirpath, file)) log.info('{} measurement files to move'.format(len(measurements))) for meas in measurements: new_path = os.path.join(os.path.dirname(meas.replace(old_folder, new_folder)), 'AuxMeasurements', os.path.basename(meas)) if not os.path.isdir(os.path.dirname(new_path)): os.makedirs(os.path.dirname(new_path)) os.rename(meas, new_path) log.info('Moving {} to {}'.format(meas, new_path)) # Create a List listOfEmptyDirs = list() # Iterate over the directory tree and check if directory is empty. for (dirpath, dirnames, filenames) in os.walk(old_folder): if len(dirnames) == 0 and len(filenames) == 0: listOfEmptyDirs.append(dirpath) print(len(listOfEmptyDirs), 'empty folders') for empty in listOfEmptyDirs: os.rmdir(empty) log.info('Process Complete')
def validate(topoPath, xmlfile, visitID): """ Validate champ topo data in flat folder structure :param topoPath: Full Path to topo data (i.e. GISLayers) :return: 0 for success 1 for all code failures or unhandled problems 2 for data issues """ returnValue = 0 log = Logger("Validation") survey = CHaMPSurvey() survey.load_topo_project(topoPath, visitID) validationResults = survey.validate() stats = { "errors": 0, "warnings": 0, "nottested": 0, "status": Status.PASS, "layers": { } } for datasetName, datasetResults in validationResults.iteritems(): layerstatus = Status.PASS for result in datasetResults: log.info("[{0:{4}<{5}}] [{1}] [{2}] {3}".format(result["Status"], datasetName, result["TestName"], result["Message"], " ", 10)) if result["Status"] == "Error" :#or result["Status"] == "NotTested": stats["errors"] += 1 stats["status"] = Status.FAIL layerstatus = Status.FAIL returnValue = 2 elif result["Status"] == "NotTested": stats["warnings"] += 1 elif result["Status"] == "Warning": stats["nottested"] += 1 stats['layers'][datasetName] = layerstatus if len(validationResults) == 0: log.error("No layers found to validate") stats["errors"] += 1 stats["status"] = Status.FAIL returnValue = 2 # The last message is what gets picked up so let's be clever: if returnValue == 2: log.error("Validation Failed") else: log.error("Validation Passed") writeMetricsToXML(validationResults, stats, xmlfile) return returnValue
def main(): # parse command line options parser = argparse.ArgumentParser() parser.add_argument('siteid', help='the id of the site to use (no spaces)', type=str) parser.add_argument('outputfolder', help='Output folder') parser.add_argument('--logfile', help='Get more information in your logs.', default="", type=str) parser.add_argument('--verbose', help='Get more information in your logs.', action='store_true', default=False) args = parser.parse_args() # Make sure the output folder exists resultsFolder = os.path.join(args.outputfolder, "outputs") topoDataFolder = os.path.join(args.outputfolder, "inputs") if not os.path.isdir(args.outputfolder): os.makedirs(args.outputfolder) if not os.path.isdir(resultsFolder): os.makedirs(resultsFolder) if not os.path.isdir(topoDataFolder): os.makedirs(topoDataFolder) # Initiate the log file if args.logfile == "": logfile = os.path.join(resultsFolder, "siteproperties.log") else: logfile = args.logfile logg = Logger("SiteProperties") logg.setup(logPath=logfile, verbose=args.verbose) try: sitePropsGenerator(args.siteid, resultsFolder, topoDataFolder, args.verbose) except (MissingException, NetworkException, DataException) as e: traceback.print_exc(file=sys.stdout) sys.exit(e.returncode) except AssertionError as e: traceback.print_exc(file=sys.stdout) sys.exit(1) except Exception as e: traceback.print_exc(file=sys.stdout) sys.exit(1) sys.exit(0)
def main(): # parse command line options parser = argparse.ArgumentParser() parser.add_argument('visitID', help='the id of the site to use (no spaces)',type=str) parser.add_argument('outputfolder', help='Output folder') parser.add_argument('--datafolder', help='(optional) Top level folder containing TopoMetrics Riverscapes projects', type=str) parser.add_argument('--logfile', help='output log file.', default="", type=str) parser.add_argument('--verbose', help = 'Get more information in your logs.', action='store_true', default=False) args = parser.parse_args() # Make sure the output folder exists resultsFolder = os.path.join(args.outputfolder, "outputs") # Initiate the log file if args.logfile == "": logfile = os.path.join(resultsFolder, "cad_export.log") else: logfile = args.logfile logg = Logger("CADExport") logg.setup(logPath=logfile, verbose=args.verbose) try: # Make some folders if we need to: if not os.path.isdir(args.outputfolder): os.makedirs(args.outputfolder) if not os.path.isdir(resultsFolder): os.makedirs(resultsFolder) # If we need to go get our own topodata.zip file and unzip it we do this if args.datafolder is None: topoDataFolder = os.path.join(args.outputfolder, "inputs") fileJSON, projectFolder = downloadUnzipTopo(args.visitID, topoDataFolder) # otherwise just pass in a path to existing data else: projectFolder = args.datafolder projectxml = os.path.join(projectFolder, "project.rs.xml") finalResult = export_cad_files(projectxml, resultsFolder) except (MissingException, NetworkException, DataException) as e: traceback.print_exc(file=sys.stdout) sys.exit(e.returncode) except AssertionError as e: traceback.print_exc(file=sys.stdout) sys.exit(1) except Exception as e: traceback.print_exc(file=sys.stdout) sys.exit(1) sys.exit(0)
def main(): # parse command line options parser = argparse.ArgumentParser() parser.add_argument('workbench', help='Workbench database path', type=argparse.FileType('r')) parser.add_argument('outputfolder', help='Path to output folder', type=str) args = parser.parse_args() if not os.path.isdir(args.outputfolder): os.makedirs(args.outputfolder) # Initiate the log file logg = Logger("Measurement Downloader") logfile = os.path.join(args.outputfolder, "measurement_downloader.log") logg.setup(logPath=logfile, verbose=False) try: metric_downloader(args.workbench.name, args.outputfolder) except (DataException, MissingException, NetworkException) as e: # Exception class prints the relevant information traceback.print_exc(file=sys.stdout) sys.exit(e.returncode) except AssertionError as e: logg.error(e.message) traceback.print_exc(file=sys.stdout) sys.exit(1) except Exception as e: logg.error(e.message) traceback.print_exc(file=sys.stdout) sys.exit(1) sys.exit(0)
def getAllVisits(siteID): log = Logger('Visits') log.info("Getting all visits for site: {}".format(siteID)) mangledSiteID = re.sub('[\s_-]', '', siteID) siteData = APIGet('sites/{}'.format(mangledSiteID)) if 'visits' not in siteData or len(siteData['visits']) == 0: raise MissingException("No visits found for site `{}`.".format(siteID)) return [ visit for visit in siteData['visits'] if visit['sampleDate'] is not None ]
def add_metadata(self, valdict, node=None): log = Logger('add_metadata') metadata_element = node.find( 'MetaData') if node is not None else self.XMLBuilder.find( 'MetaData') for mkey, mval in valdict.items(): if metadata_element is None: if node is not None: metadata_element = self.XMLBuilder.add_sub_element( node, "MetaData") else: metadata_element = self.XMLBuilder.add_sub_element( self.XMLBuilder.root, "MetaData") found = metadata_element.findall('Meta[@name="{}"]'.format(mkey)) # Only one key-value pair are allowed with the same name. This cleans up any stragglers if len(found) > 0: for f in found: metadata_element.remove(f) # Note: we don't do a replace=False here because that only verifies the id attribute and we're # using 'name' for uniqueness self.XMLBuilder.add_sub_element(metadata_element, "Meta", mval, {"name": mkey}) self.XMLBuilder.write()
def champ_topo_checker(workbench, folder): log = Logger('CHaMP Files') log.setup(logPath=os.path.join(folder, datetime.now().strftime("%Y%m%d-%H%M%S") + '_champ_files.log')) dbCon = sqlite3.connect(workbench) dbCurs = dbCon.cursor() dbCurs.execute('SELECT WatershedName, VisitYear, SiteName, VisitID' + ' FROM vwVisits WHERE ProgramID = 1 AND ProtocolID IN (2030, 416, 806, 1966, 2020, 1955, 1880, 10036, 9999)' + ' ORDER BY VisitYear, WatershedName') for row in dbCurs.fetchall(): watershed = row[0] visit_year = row[1] site = row[2] visitID = row[3] visit_path = os.path.join(folder, str(visit_year), watershed.replace(' ', ''), site.replace(' ', ''), 'VISIT_{}'.format(visitID)) log.info('Processing {}'.format(visit_path)) if not os.path.isdir(visit_path): os.makedirs(visit_path) try: visit_data = APIGet('visits/{}'.format(visitID)) # Write visit information to json file with open(os.path.join(visit_path, 'visit_info.json'), 'w') as json_file: json.dump(visit_data, json_file) # Loop over the two lists of folders per visit: field folders and visit folders for api_key, local_folder in {'fieldFolders': 'Field Folders', 'folders': 'Visit Folders'}.items(): if api_key in visit_data and isinstance(visit_data[api_key], list): for folder_name in visit_data[api_key]: field_folder_path = os.path.join(visit_path, local_folder, folder_name['name']) field_folder_data = APIGet(folder_name['url'], True) if isinstance(field_folder_data, dict) and 'files' in field_folder_data: [download_file(file_dict, field_folder_path) for file_dict in field_folder_data['files']] # Get all the miscellaneous files for the visit [download_file(file_dict, os.path.join(visit_path, 'Files')) for file_dict in visit_data['files']] except Exception as e: log.error('Error for visit {}: {}'.format(visitID, e)) log.info('Process Complete')
def myMainMethod(topoDataFolder, xmlfile, visitID): """ :param jsonFilePath: :param outputFolder: :param bVerbose: :return: """ log = Logger("myMainMethod") # dothingA() log.info("I did thing A") # dothingB() log.info("I did thing B") # Write XML() log.info("I wrote my XML file") # writelogs() log.info("I wrote my log files")
def runAuxMetrics(xmlfile, outputDirectory, visit_id): log = Logger("Validation") # Make a big object we can pass around try: visit = APIGet("visits/{}".format(visit_id)) except MissingException, e: raise MissingException("Visit Not Found in API")
def add_project_raster(self, parent_node, rs_lyr, replace=False): log = Logger('add_project_raster') file_path = os.path.join(os.path.dirname(self.xml_path), rs_lyr.rel_path) file_dir = os.path.dirname(file_path) self.add_dataset(parent_node, file_path, rs_lyr, 'Raster', replace) return file_path
def buildManualFile(self, layerFileName, bMandatory): """ Building a file path using manual layer file naming :param layerName: :param bMandatory: :return: """ path = "" log = Logger("buildManualFile") try: match = next(file for file in os.listdir(self.directory) if file.lower() == layerFileName.lower()) path = os.path.join(self.directory, match) except Exception, e: log.warning( "The file called '{0}' does not exist in directory: {1}". format(layerFileName, self.directory)) pass
def test_getAbsInsensitivePath(self): from lib.util import getAbsInsensitivePath from lib.loghelper import Logger log = Logger("FakeLogger") base = os.path.dirname(__file__) testpaths = [ { "in": os.path.join(base), "out": os.path.join(base), }, { "in" : os.path.join(base, "../tools/topometrics"), "out": os.path.join(base, "../tools/topometrics"), }, { "in": os.path.join(base, "../tools/topometrics/topometrics.py"), "out": os.path.join(base, "../tools/topometrics/topometrics.py"), }, { "in": os.path.join(base, "../TOOLS/topoMetrics"), "out": os.path.join(base, "../tools/topometrics"), }, { "in": os.path.join(base, "../tools\\topoMetrics"), "out": os.path.join(base, "../tools/topometrics"), }, ] # Test the normal case (we're catching warnings too) for testpath in testpaths: with mock.patch('lib.loghelper.Logger.warning') as ctx: result = getAbsInsensitivePath(testpath['in']) # Make sure we get back the path we expect self.assertEqual(result, testpath['out']) # Make sure we get back the right number of warnings if testpath['in'] != testpath['out']: self.assertEqual(ctx.call_count, 1) else: self.assertEqual(ctx.call_count, 0) # Test the file not found case where it throws a MissingException brokenpath = os.path.join(base, "../tools/NOTTHERE/thing.dxf") with self.assertRaises(MissingException) as e: getAbsInsensitivePath(brokenpath) # Now test where we don't care br_result = getAbsInsensitivePath(brokenpath, ignoreAbsent=True) self.assertEqual(br_result, brokenpath) # Test the empty case broken2 = '' with self.assertRaises(IOError) as e: getAbsInsensitivePath(broken2)
def calculate(apiData): """ Calculate riparian structure metrics :param apiData: dictionary of API data. Key is API call name. Value is API data :return: metrics dictionary """ raise Exception( 'TODO: Code abandoned after it was determined that this was not needed.' ) log = Logger('riparianCoverMetrics') log.info("Running RiparianCoverMetrics") # Retrieve the riparian structure API data riparianVals = [ val['value'] for val in apiData['RiparianStructure']['values'] ] # calculate metrics return _calc(riparianVals)
def visitTopoAuxMetrics(visitID, metricXMLPath): log = Logger('Metrics') log.info("Topo aux metrics for visit {0}".format(visitID)) # Make all the API calls and return a dictionary of API call name keyed to data apiData = downloadAPIData(visitID) # Dictionary to hold the metric values visitMetrics = {} metric_uc = UndercutMetrics(apiData) integrateMetricDictionaryWithTopLevelType(visitMetrics, 'Undercut', metric_uc.metrics ) metrics_su = SubstrateMetrics(apiData) integrateMetricDictionaryWithTopLevelType(visitMetrics, 'Substrate', metrics_su.metrics) metrics_si = SidechannelMetrics(apiData) integrateMetricDictionaryWithTopLevelType(visitMetrics, 'SideChannel', metrics_si.metrics) metrics_fi = FishcoverMetrics(apiData) integrateMetricDictionaryWithTopLevelType(visitMetrics, 'FishCover', metrics_fi.metrics) metrics_wo = LargeWoodMetrics(apiData) integrateMetricDictionaryWithTopLevelType(visitMetrics, 'LargeWood', metrics_wo.metrics) # Metric calculation complete. Write the topometrics to the XML file writeMetricsToXML(visitMetrics, visitID, '', metricXMLPath, 'TopoAuxMetrics', __version__) log.info("Metric calculation complete for visit {0}".format(visitID)) return visitMetrics
def safe_makedirs(dir_create_path): """safely, recursively make a directory Arguments: dir_create_path {[type]} -- [description] """ log = Logger("MakeDir") # Safety check on path lengths if len(dir_create_path) < 5 or len(dir_create_path.split('/')) <= 2: raise Exception('Invalid path: {}'.format(dir_create_path)) if os.path.exists(dir_create_path) and os.path.isfile(dir_create_path): raise Exception( 'Can\'t create directory if there is a file of the same name: {}'. format(dir_create_path)) if not os.path.exists(dir_create_path): try: log.info('Folder not found. Creating: {}'.format(dir_create_path)) os.makedirs(dir_create_path) except Exception as e: # Possible that something else made the folder while we were trying if not os.path.exists(dir_create_path): log.error( 'Could not create folder: {}'.format(dir_create_path)) raise e
def dryWidth(xs, rivershapeWithDonuts): """ :param xs: shapely cross section object :param rivershapeWithDonuts: Polygon with non-qualifying donuts retained :return: """ # Get all intersects of this crosssection with the rivershape log = Logger("dryWidth") try: intersects = xs.intersection(rivershapeWithDonuts.buffer(0)) #KMW: buffer(0) clears up invalid geoms except TopologicalError as e: log.error(e.message) raise DataException("Could not perform intersection on `rivershapeWithDonuts`. Look for small, invalid islands as a possible cause.") # The intersect may be one object (LineString) or many. We have to handle both cases if intersects.type == "LineString": intersects = MultiLineString([intersects]) elif intersects.type == "Point": return 0 return sum([intersect.length for intersect in intersects])
def add_project_pdf(self, parent_node, rs_lyr, replace=False, att_filter=None): log = Logger('add_project_vector') file_path = os.path.join(os.path.dirname(self.xml_path), rs_lyr.rel_path) file_dir = os.path.dirname(file_path) self.add_dataset(parent_node, file_path, rs_lyr, 'PDF', replace) return file_path
def BatchRun(workbench, topoData, outputDir): dbCon = sqlite3.connect(workbench) dbCurs = dbCon.cursor() # dbCurs.execute('SELECT VisitID, WatershedName, VisitYear, SiteName FROM vwMainVisitList WHERE (VisitID IN ({0}))'.format(','.join(map(lambda x: str(x), jdAux)))) # for row in dbCurs.fetchall(): log = Logger('Topo Metrics') log.setup(logPath=os.path.join(outputDir, "topo_metrics.log"), verbose=False) projects = getTopoProjects(topoData) print len(projects), 'topo projects found in', topoData rootOutput = os.path.join(outputDir, 'YankeeFork') print 'Outputing results to', rootOutput for project in projects: print(project) # if project[0] == 9028 or project[0] == 9027 or project[0] == 9023 or project[0] == 9022: # continue outputFolder = project[3].replace(topoData, outputDir) if not os.path.isdir(outputFolder): os.makedirs(outputFolder) # Generate a Channel Units JSON file using the ShapeFile as the truth jsonFilePath = os.path.join(outputFolder, 'channel_units.json') createChannelUnitsJSON(project[3], project[0], jsonFilePath) # Calculate topo metrics visitTopoMetrics(project[0], os.path.join(outputFolder, 'topo_metrics.xml'), project[3], jsonFilePath, None, dUnitDefs) print(projects)
def topomover(jsonfile): log = Logger("TopoMover") visitsraw = APIGet('visits') visitsreorg = { v['id']: v for v in visitsraw } visitids = [v['id'] for v in visitsraw] visitids.sort() # Load the inventory inventory = {} if os.path.isfile(jsonfile): try: with open(jsonfile, "r") as f: inventory = json.load(f) except Exception, e: pass
def process_duplicate_folder(with_spaces, no_spaces): log = Logger('Duplicate') movers = [] for root, dirs, files in os.walk(with_spaces): for name in files: old_path = os.path.join(root, name) new_path = old_path.replace(with_spaces, no_spaces) # Simply delete the file if it is zero bytes if os.stat(old_path).st_size == 0: log.info('Deleting zero byte file {}'.format(old_path)) os.remove(old_path) continue if not os.path.isdir(os.path.dirname(new_path)): os.makedirs(os.path.dirname(new_path)) if os.path.isfile(new_path): os.remove(old_path) else: print('Moving file {}'.format(old_path)) os.rename(old_path, new_path)
def exportAsCSV(feats, outCSVfile): log = Logger("CSVExport") log.info("Beginning CSV Export") with open(outCSVfile, "wb") as csvfile: csvWriter = csv.writer(csvfile) #fieldsGIS = ("POINT_NUMBER", "SHAPE@Y", "SHAPE@X", "SHAPE@Z", "DESCRIPTION") csvWriter.writerow(("PNTNO", "Y", "X", "ELEV", "DESC")) for feat in feats: # Do some checking on mandatory fields first pnfield = getfield(feat, ["POINT_NUMB", "Point_Numb", "numb", "Number", "Point", "points", "p", "Point_Id", "PointId", "POINT_ID", "POINTID", "PointNumbe", "Point_id", "Name", "FID", "OBJECTID"]) cfield = getfield(feat, ["Code","CODE"]) row = (feat['fields'][pnfield], feat['geometry'].x, feat['geometry'].y, feat['geometry'].z, feat['fields'][cfield] ) csvWriter.writerow(row) log.info("CSV Export complete") return outCSVfile
def champ_topo_checker(workbench, folder): log = Logger('CHaMP Files') log.setup(logPath=os.path.join( folder, datetime.now().strftime("%Y%m%d-%H%M%S") + '_champ_folder_check.log')) # # Loop over site names organized by field season and watershed # dbCon = sqlite3.connect(workbench) # dbCurs = dbCon.cursor() # dbCurs.execute('SELECT WatershedName, VisitYear, SiteName' + # ' FROM vwVisits WHERE ProgramID = 1 AND ProtocolID IN (2030, 416, 806, 1966, 2020, 1955, 1880, 10036, 9999)' + # ' GROUP BY WatershedName, VisitYear, SiteName' + # ' ORDER BY VisitYear, WatershedName, SiteName') # # for row in dbCurs.fetchall(): # # watershed = row[0] # visit_year = row[1] # site = row[2] # # visitID = row[3] # # visit_path1 = os.path.join(folder, str(visit_year), watershed.replace(' ', ''), site) # visit_path2 = visit_path1.replace(' ', '') # if ' ' in site and os.path.isdir(visit_path1) and os.path.isdir(visit_path2): # try: # process_duplicate_folder(visit_path1, visit_path2) # except Exception as e: # log.error('Error processing {}'.format(visit_path1)) # Create a List listOfEmptyDirs = list() # Iterate over the directory tree and check if directory is empty. for (dirpath, dirnames, filenames) in os.walk(folder): if len(dirnames) == 0 and len(filenames) == 0: listOfEmptyDirs.append(dirpath) print(len(listOfEmptyDirs), 'empty folders') for empty in listOfEmptyDirs: os.rmdir(empty) log.info('Process Complete')
def downloadExtractParseVisits(visits, outputFolder): log = Logger('Downloading') log.info("Downloading all visits from the API") projects = [] for visit in visits: try: extractpath = os.path.join(outputFolder, 'VISIT_{}'.format(visit)) projpath = os.path.join(extractpath, 'project.rs.xml') downloadUnzipTopo(visit, extractpath) proj = TopoProject(extractpath) if proj.isrsproject: projects.append({"project": proj, "visit": visit}) else: log.error("File not found: {}".format(projpath)) raise DataException("Missing Project File") # Just move on if something fails except Exception, e: pass
def main(): # parse command line options parser = argparse.ArgumentParser() parser.add_argument('visitID', help='Visit ID', type=int) parser.add_argument('outputfolder', help='Path to output folder', type=str) parser.add_argument( '--datafolder', help= '(optional) Top level folder containing TopoMetrics Riverscapes projects', type=str) parser.add_argument('--verbose', help='Get more information in your logs.', action='store_true', default=False) args = parser.parse_args() # Make sure the output folder exists resultsFolder = os.path.join(args.outputfolder, "outputs") # Initiate the log file logg = Logger("Program") logfile = os.path.join(resultsFolder, "bankfull_metrics.log") xmlfile = os.path.join(resultsFolder, "bankfull_metrics.xml") logg.setup(logPath=logfile, verbose=args.verbose) # Initiate the log file log = Logger("Program") log.setup(logPath=logfile, verbose=args.verbose) try: # Make some folders if we need to: if not os.path.isdir(args.outputfolder): os.makedirs(args.outputfolder) if not os.path.isdir(resultsFolder): os.makedirs(resultsFolder) # If we need to go get our own topodata.zip file and unzip it we do this if args.datafolder is None: topoDataFolder = os.path.join(args.outputfolder, "inputs") fileJSON, projectFolder = downloadUnzipTopo( args.visitID, topoDataFolder) # otherwise just pass in a path to existing data else: projectFolder = args.datafolder from lib.topoproject import TopoProject topo_project = TopoProject( os.path.join(projectFolder, "project.rs.xml")) tree = ET.parse(os.path.join(projectFolder, "project.rs.xml")) root = tree.getroot() visitid = root.findtext( "./MetaData/Meta[@name='Visit']") if root.findtext( "./MetaData/Meta[@name='Visit']" ) is not None else root.findtext( "./MetaData/Meta[@name='VisitID']") finalResult = bankfull_metrics(topo_project.getpath("DEM"), topo_project.getpath("DetrendedDEM"), topo_project.getpath("Topo_Points")) write_bfmetrics_xml(finalResult, visitid, xmlfile) sys.exit(0) except (DataException, MissingException, NetworkException) as e: # Exception class prints the relevant information traceback.print_exc(file=sys.stdout) sys.exit(e.returncode) except AssertionError as e: log.error(e.message) traceback.print_exc(file=sys.stdout) sys.exit(1) except Exception as e: log.error(e.message) traceback.print_exc(file=sys.stdout) sys.exit(1)
def BankfullMetrics(dem, detrended_dem, shp_points): """ :param topoDataFolder: :param results_xmlfile: :param visitid: :return: """ log = Logger("Bankfull Metrics") # 1. find the average elevation of crew bankfull points in the detrended DEM. gdf_topo_points = geopandas.GeoDataFrame().from_file(shp_points) gdf_bf_points = None if 'Code' in gdf_topo_points: gdf_bf_points = gdf_topo_points[gdf_topo_points["Code"] == 'bf'] else: gdf_bf_points = gdf_topo_points[gdf_topo_points["code"] == 'bf'] log.info("Loaded BF points") with rasterio.open(detrended_dem) as rio_detrended: bf_elevations = [ v[0] for v in rio_detrended.sample( zip([Point(p).x for p in gdf_bf_points.geometry], [Point(p).y for p in gdf_bf_points.geometry])) if v[0] != rio_detrended.nodata ] # Filter out points not within detrendedDEM data extent. detrended_band = rio_detrended.read(1) if len(bf_elevations) == 0: log.error("No valid bf elevation points found.") else: log.info("Sampled {} valid BF point elevations from the DetrendedDEM". format(str(len(bf_elevations)))) with rasterio.open(dem) as rio_dem: dem_band = rio_dem.read(1) # enforce orthogonal rasters dem_pad_top = int( (rio_detrended.bounds.top - rio_dem.bounds.top) / 0.1) if rio_detrended.bounds.top > rio_dem.bounds.top else 0 dem_pad_bottom = int( (rio_dem.bounds.bottom - rio_detrended.bounds.bottom) / 0.1) if rio_dem.bounds.bottom > rio_detrended.bounds.bottom else 0 dem_pad_right = int( (rio_detrended.bounds.right - rio_dem.bounds.right) / 0.1) if rio_detrended.bounds.right > rio_dem.bounds.right else 0 dem_pad_left = int( (rio_dem.bounds.left - rio_detrended.bounds.left) / 0.1) if rio_dem.bounds.left > rio_detrended.bounds.left else 0 det_pad_top = int( (rio_dem.bounds.top - rio_detrended.bounds.top) / 0.1) if rio_detrended.bounds.top < rio_dem.bounds.top else 0 det_pad_bottom = int( (rio_detrended.bounds.bottom - rio_dem.bounds.bottom) / 0.1) if rio_dem.bounds.bottom < rio_detrended.bounds.bottom else 0 det_pad_right = int( (rio_dem.bounds.right - rio_detrended.bounds.right) / 0.1) if rio_detrended.bounds.right < rio_dem.bounds.right else 0 det_pad_left = int( (rio_detrended.bounds.left - rio_dem.bounds.left) / 0.1) if rio_dem.bounds.left < rio_detrended.bounds.left else 0 np_detrended_ortho = np.pad(detrended_band, ((det_pad_top, det_pad_bottom), (det_pad_left, det_pad_right)), mode="constant", constant_values=np.nan) np_dem_ortho = np.pad(dem_band, ((dem_pad_top, dem_pad_bottom), (dem_pad_left, dem_pad_right)), mode="constant", constant_values=np.nan) if all(v == 0 for v in [ dem_pad_top, dem_pad_bottom, dem_pad_right, dem_pad_left, det_pad_top, det_pad_bottom, det_pad_right, det_pad_left ]): log.info("DEM and DetrendedDEM have concurrent extents") else: log.warning( "Non-Concurrent Rasters encountered. DEM and DetrendedDEM using padded extents" ) ma_detrended = np.ma.MaskedArray( np_detrended_ortho, np.equal(np_detrended_ortho, rio_detrended.nodata)) ma_dem = np.ma.MaskedArray(np_dem_ortho, np.equal(np_dem_ortho, rio_dem.nodata)) # Generate Trend Grid np_trendgrid = np.subtract(ma_dem, ma_detrended) log.info("Trend surface created") # Average BF elev to constant raster in detrended space ave_bf_det_elev = sum(bf_elevations) / float(len(bf_elevations)) ma_bf_detrended = np.full_like(ma_detrended, ave_bf_det_elev, dtype=np.float64) log.info("Detrended BF surface created") # add trend grid to BF detrended surface np_bf_surface = np.add(ma_bf_detrended, np_trendgrid) log.info("BF elevation surface created") # Generate depth and volume np_bf_depth_raw = np.subtract(np_bf_surface, ma_dem) np_bf_depth = np.multiply(np.greater(np_bf_depth_raw, 0), np_bf_depth_raw) np_bf_volume = np.multiply(np_bf_depth, 0.1 * 0.1) log.info("BF Depth surface created") ma_bf_depth = np.ma.MaskedArray(np_bf_depth, np.equal( np_bf_depth, -0.0)) # -0.0 values were getting included in the mean calculation # Run ZonalStatisticsAsTable to get the metric values: # Sum the bankfull depth raster values and multiply by the area of one cell to produce BFVol. # Max the bankfull depth raster values is DepthBF_Max. # Average the bankfull depth raster values is DepthBF_Avg bf_volume = np.nansum(np_bf_volume) bf_depth_max = np.nanmax(ma_bf_depth) bf_depth_mean = np.nanmean(ma_bf_depth) log.info("BF metrics calculated") results = { "Volume": bf_volume, "Depth": { "Max": bf_depth_max, "Mean": bf_depth_mean } } return results