def move_measurements(old_folder, new_folder):

    log = Logger('Move Measurements')
    log.setup(logPath=os.path.join(new_folder, datetime.now().strftime("%Y%m%d-%H%M%S") + 'move_measurements.log'))

    # Create a List
    measurements = list()
    # Iterate over the directory tree and check if directory is empty.
    for (dirpath, dirnames, filenames) in os.walk(old_folder):
        for file in filenames:
            measurements.append(os.path.join(dirpath, file))

    log.info('{} measurement files to move'.format(len(measurements)))

    for meas in measurements:
        new_path = os.path.join(os.path.dirname(meas.replace(old_folder, new_folder)), 'AuxMeasurements', os.path.basename(meas))

        if not os.path.isdir(os.path.dirname(new_path)):
            os.makedirs(os.path.dirname(new_path))

        os.rename(meas, new_path)
        log.info('Moving {} to {}'.format(meas, new_path))

    # Create a List
    listOfEmptyDirs = list()
    # Iterate over the directory tree and check if directory is empty.
    for (dirpath, dirnames, filenames) in os.walk(old_folder):
        if len(dirnames) == 0 and len(filenames) == 0:
            listOfEmptyDirs.append(dirpath)

    print(len(listOfEmptyDirs), 'empty folders')
    for empty in listOfEmptyDirs:
        os.rmdir(empty)

    log.info('Process Complete')
Beispiel #2
0
def driftBiomassDensity(visitMetrics, driftInverts, driftInvertResults, sampleBiomasses):
    log = Logger("driftBiomassDensity")
    if driftInverts is None or driftInverts["values"].__len__() == 0:
        visitMetrics["DriftBiomassDensity"] = None
        return

    if driftInvertResults is None or driftInvertResults["values"].__len__() == 0:
        visitMetrics["DriftBiomassDensity"] = None
        return

    if sampleBiomasses is None:
        visitMetrics["DriftBiomassDensity"] = None
        return

    volumes = [s["value"]["VolumeSampled"] for s in driftInverts["values"]]

    if any([v is None for v in volumes]):
        log.warning("VolumeSampled contains 'None'")


    sumVolumeSampled = np.sum([v for v in volumes if v is not None])
    sampleResult = next((i for i in driftInvertResults["values"]))
    sumSampleBiomass = np.sum([s["value"]["DryMassGrams"] / sampleResult["value"]["PortionOfSampleSorted"]
                               for s in sampleBiomasses["values"]])

    visitMetrics["DriftBiomassDensity"] = None

    if sumVolumeSampled > 0:
        visitMetrics["DriftBiomassDensity"] = sumSampleBiomass / sumVolumeSampled
Beispiel #3
0
def main():
    # parse command line options
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--jsonfile',
        help=
        'The sync file. Helps speed a process up to figure out which files to work with.',
        default="topomover.json",
        type=str)
    parser.add_argument('--verbose',
                        help='Get more information in your logs.',
                        action='store_true',
                        default=False)

    logg = Logger("CADExport")
    logfile = os.path.join(os.path.dirname(__file__), "TopoMover.log")
    logg.setup(logPath=logfile, verbose=False)
    logging.getLogger("boto3").setLevel(logging.ERROR)
    args = parser.parse_args()

    try:
        topomover(args.jsonfile)

    except (MissingException, NetworkException, DataException) as e:
        traceback.print_exc(file=sys.stdout)
        sys.exit(e.returncode)
    except AssertionError as e:
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)
    except Exception as e:
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)
    sys.exit(0)
Beispiel #4
0
def dryWidth(xs, rivershapeWithDonuts):
    """

    :param xs: shapely cross section object
    :param rivershapeWithDonuts: Polygon with non-qualifying donuts retained
    :return:
    """
    # Get all intersects of this crosssection with the rivershape
    log = Logger("dryWidth")
    try:
        intersects = xs.intersection(rivershapeWithDonuts.buffer(
            0))  #KMW: buffer(0) clears up invalid geoms
    except TopologicalError as e:
        log.error(e)
        raise DataException(
            "Could not perform intersection on `rivershapeWithDonuts`. Look for small, invalid islands as a possible cause."
        )

    # The intersect may be one object (LineString) or many. We have to handle both cases
    if intersects.type == "LineString":
        intersects = MultiLineString([intersects])
    elif intersects.type == "Point":
        return 0

    return sum([intersect.length for intersect in intersects])
Beispiel #5
0
def BatchRun(workbench, topoData, outputDir):

    # dbCon = sqlite3.connect(workbench)
    # dbCurs = dbCon.cursor()
    # dbCurs.execute('SELECT VisitID, WatershedName, VisitYear, SiteName FROM vwMainVisitList WHERE (VisitID IN ({0}))'.format(','.join(map(lambda x: str(x), jdAux))))
    # for row in dbCurs.fetchall():

    log = Logger('Topo Metrics')
    log.setup(logPath=os.path.join(outputDir, "topo_metrics.log"), verbose=False)

    projects = getTopoProjects(topoData)
    print(len(projects), 'topo projects found in', topoData)
    rootOutput = os.path.join(outputDir, 'YankeeFork')
    print('Outputing results to', rootOutput)

    for project in projects:
        print(project)

        if project[0] == 9053 or project[0] == 9020:
            continue

        outputFolder = project[3].replace(topoData, outputDir)

        if not os.path.isdir(outputFolder):
            os.makedirs(outputFolder)

        # Generate a Channel Units JSON file using the ShapeFile as the truth
        jsonFilePath = os.path.join(outputFolder, 'channel_units.json')
        createChannelUnitsJSON(project[3], project[0], jsonFilePath)

        # Calculate topo metrics
        visitTopoMetrics(project[0], os.path.join(outputFolder, 'topo_metrics.xml'), project[3], jsonFilePath, None, dUnitDefs)

    print(projects)
Beispiel #6
0
def validate(topoPath, xmlfile, visitID):
    """
    Validate champ topo data in flat folder structure
    :param topoPath: Full Path to topo data (i.e. GISLayers)
    :return: 0 for success
             1 for all code failures or unhandled problems
             2 for data issues
    """

    returnValue = 0
    log = Logger("Validation")

    survey = CHaMPSurvey()
    survey.load_topo_project(topoPath, visitID)
    validationResults = survey.validate()

    stats = {
        "errors": 0,
        "warnings": 0,
        "nottested": 0,
        "status": Status.PASS,
        "layers": {}
    }
    for datasetName, datasetResults in validationResults.items():
        layerstatus = Status.PASS
        for result in datasetResults:
            log.info("[{0:{4}<{5}}] [{1}] [{2}] {3}".format(
                result["Status"], datasetName, result["TestName"],
                result["Message"], " ", 10))
            if result[
                    "Status"] == "Error":  #or result["Status"] == "NotTested":
                stats["errors"] += 1
                stats["status"] = Status.FAIL
                layerstatus = Status.FAIL
                returnValue = 2
            elif result["Status"] == "NotTested":
                stats["warnings"] += 1
            elif result["Status"] == "Warning":
                stats["nottested"] += 1

        stats['layers'][datasetName] = layerstatus

    if len(validationResults) == 0:
        log.error("No layers found to validate")
        stats["errors"] += 1
        stats["status"] = Status.FAIL
        returnValue = 2

    # The last message is what gets picked up so let's be clever:
    if returnValue == 2:
        log.error("Validation Failed")
    else:
        log.error("Validation Passed")

    writeMetricsToXML(validationResults, stats, xmlfile)

    return returnValue
Beispiel #7
0
def APIDelete(url, absolute=False):
    """
    DELETE Wrapper around APICall
    :param url:
    :param absolute:
    :return:
    """
    log = Logger("APIDelete")
    log.info("Making Call: DELETE {}".format(url))
    return APICall(url, absolute, method=requests.delete)
Beispiel #8
0
def getAllVisits(siteID):
    log = Logger('Visits')
    log.info("Getting all visits for site: {}".format(siteID))

    mangledSiteID = re.sub('[\s_-]', '', siteID)

    siteData = APIGet('sites/{}'.format(mangledSiteID))

    if 'visits' not in siteData or len(siteData['visits']) == 0:
        raise MissingException("No visits found for site `{}`.".format(siteID))

    return [visit for visit in siteData['visits'] if visit['sampleDate'] is not None]
Beispiel #9
0
def getAbsInsensitivePath(abs_insensitive_path, ignoreAbsent=False):
    """
    Will sanitize cases and return the correct case.
    :param abs_insensitive_path:
    :param ignoreAbsent: if true this will not throw an exception and just return the path
    :return:
    """
    log = Logger("getAbsInsensitivePath")

    if len(abs_insensitive_path) == 0:
        raise IOError("Zero-length path used: getAbsInsensitivePath()")

    if os.path.sep == "/":
        pathreplaced = abs_insensitive_path.replace("\\", os.path.sep)
    else:
        pathreplaced = abs_insensitive_path.replace("/", os.path.sep)

    parts = pathreplaced.split(os.path.sep)

    improved_parts = []

    for part in parts:
        if part == ".." or part == "." or part == "":
            improved_parts.append(part)
        else:
            improved_path = os.path.sep.join(improved_parts)
            if len(improved_path) == 0:
                improved_path = os.path.sep
            try:
                found = False
                for name in os.listdir(improved_path):
                    if part.lower() == name.lower():
                        improved_parts.append(name)
                        found = True
                if not found:
                    raise OSError("Not found")
            except OSError as e:
                if not ignoreAbsent:
                    raise MissingException(
                        "Could not find case-insensitive path: {}".format(
                            abs_insensitive_path))
                else:
                    return abs_insensitive_path

    finalpath = os.path.sep.join(improved_parts)

    if (abs_insensitive_path != finalpath):
        log.warning("Paths do not match: `{}`  != `{}`".format(
            abs_insensitive_path, finalpath))

    return finalpath
def main():
    # parse command line options
    parser = argparse.ArgumentParser()
    parser.add_argument('workbench',
                        help='Workbench database path',
                        type=argparse.FileType('r'))
    parser.add_argument('outputfolder', help='Path to output folder', type=str)
    args = parser.parse_args()

    if not os.path.isdir(args.outputfolder):
        os.makedirs(args.outputfolder)

    # Initiate the log file
    logg = Logger("Measurement Downloader")
    logfile = os.path.join(args.outputfolder, "measurement_downloader.log")
    logg.setup(logPath=logfile, verbose=False)

    try:

        metric_downloader(args.workbench.name, args.outputfolder)

    except (DataException, MissingException, NetworkException) as e:
        # Exception class prints the relevant information
        traceback.print_exc(file=sys.stdout)
        sys.exit(e.returncode)
    except AssertionError as e:
        logg.error(e)
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)
    except Exception as e:
        logg.error(e)
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)

    sys.exit(0)
Beispiel #11
0
def loadChannelUnitsFromAPI(vid):

    apiUnits = APIGet('visits/{}/measurements/Channel Unit'.format(vid))
    dUnits = {}

    for nodUnit in apiUnits['values']:
        value = nodUnit['value']
        nCUNumber = int(value['ChannelUnitNumber'])
        tier1 = value['Tier1']
        tier2 = value['Tier2']
        segment = value['ChannelSegmentID']

        dUnits[nCUNumber] = (tier1, tier2, segment)

    log = Logger("Channel Units")
    log.info("{0} channel units loaded from XML file".format(len(dUnits)))

    return dUnits
Beispiel #12
0
def APIGet(url, absolute=False):
    """
    GET Wrapper around APIGet
    :param url:
    :param absolute:
    :return:
    """
    log = Logger("APIGet")
    #log.info("Making Call: GET {}".format(url))
    return APICall(url, absolute, method=requests.get)
Beispiel #13
0
def main():
    # parse command line options
    parser = argparse.ArgumentParser()
    parser.add_argument('siteid', help='the id of the site to use (no spaces)',type=str)
    parser.add_argument('outputfolder', help='Output folder')
    parser.add_argument('--logfile', help='Get more information in your logs.', default="", type=str)
    parser.add_argument('--verbose', help = 'Get more information in your logs.', action='store_true', default=False)

    args = parser.parse_args()

    # Make sure the output folder exists
    resultsFolder = os.path.join(args.outputfolder, "outputs")
    topoDataFolder = os.path.join(args.outputfolder, "inputs")

    if not os.path.isdir(args.outputfolder):
        os.makedirs(args.outputfolder)
    if not os.path.isdir(resultsFolder):
        os.makedirs(resultsFolder)
    if not os.path.isdir(topoDataFolder):
        os.makedirs(topoDataFolder)

    # Initiate the log file
    if args.logfile == "":
        logfile = os.path.join(resultsFolder, "siteproperties.log")
    else:
        logfile = args.logfile

    logg = Logger("SiteProperties")
    logg.setup(logPath=logfile, verbose=args.verbose)

    try:
        sitePropsGenerator(args.siteid, resultsFolder, topoDataFolder, args.verbose)

    except (MissingException, NetworkException, DataException) as e:
        traceback.print_exc(file=sys.stdout)
        sys.exit(e.returncode)
    except AssertionError as e:
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)
    except Exception as e:
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)
    sys.exit(0)
Beispiel #14
0
def myMainMethod(topoDataFolder, xmlfile, visitID):
    """
    :param jsonFilePath:
    :param outputFolder:
    :param bVerbose:
    :return:
    """
    log = Logger("myMainMethod")

    # dothingA()
    log.info("I did thing A")
    # dothingB()
    log.info("I did thing B")
    # Write XML()
    log.info("I wrote my XML file")
    # writelogs()
    log.info("I wrote my log files")

    return 0
Beispiel #15
0
    def test_getAbsInsensitivePath(self):
        from champmetrics.lib.util import getAbsInsensitivePath
        from champmetrics.lib.loghelper import Logger

        log = Logger("FakeLogger")

        base = os.path.dirname(__file__)
        testpaths = [
            {
                "in": os.path.join(base),
                "out": os.path.join(base),
            },
            {
                "in" : os.path.join(base, "../tools/topometrics"),
                "out": os.path.join(base, "../tools/topometrics"),
            },
            {
                "in": os.path.join(base, "../tools/topometrics/topometrics.py"),
                "out": os.path.join(base, "../tools/topometrics/topometrics.py"),
            },
            {
                "in": os.path.join(base, "../TOOLS/topoMetrics"),
                "out": os.path.join(base, "../tools/topometrics"),
            },
            {
                "in": os.path.join(base, "../tools\\topoMetrics"),
                "out": os.path.join(base, "../tools/topometrics"),
            },
        ]
        # Test the normal case (we're catching warnings too)
        for testpath in testpaths:
            with mock.patch('lib.loghelper.Logger.warning') as ctx:
                result = getAbsInsensitivePath(testpath['in'])
                # Make sure we get back the path we expect
                self.assertEqual(result, testpath['out'])

                # Make sure we get back the right number of warnings
                if testpath['in'] != testpath['out']:
                    self.assertEqual(ctx.call_count, 1)
                else:
                    self.assertEqual(ctx.call_count, 0)

        # Test the file not found case where it throws a MissingException
        brokenpath = os.path.join(base, "../tools/NOTTHERE/thing.dxf")
        with self.assertRaises(MissingException) as e:
            getAbsInsensitivePath(brokenpath)

        # Now test where we don't care
        br_result = getAbsInsensitivePath(brokenpath, ignoreAbsent=True)
        self.assertEqual(br_result, brokenpath)

        # Test the empty case
        broken2 = ''
        with self.assertRaises(IOError) as e:
            getAbsInsensitivePath(broken2)
def main():
    # parse command line options
    parser = argparse.ArgumentParser()
    parser.add_argument('visitID', help='Visit ID', type=int)
    parser.add_argument('outputfolder', help='Path to output folder', type=str)
    parser.add_argument('--channelunitsjson', help='(optional) json file to load channel units from', type=str)
    parser.add_argument('--workbenchdb', help='(optional) sqlite db to load channel units from', type=str)
    parser.add_argument('--datafolder', help='(optional) Top level folder containing TopoMetrics Riverscapes projects', type=str)
    parser.add_argument('--verbose', help='Get more information in your logs.', action='store_true', default=False)
    args = parser.parse_args()

    # Make sure the output folder exists
    resultsFolder = os.path.join(args.outputfolder, "outputs")

    # Initiate the log file
    logg = Logger("Program")
    logfile = os.path.join(resultsFolder, "topo_metrics.log")
    xmlfile = os.path.join(resultsFolder, "topo_metrics.xml")
    logg.setup(logPath=logfile, verbose=args.verbose)

    try:
        # Make some folders if we need to:
        if not os.path.isdir(args.outputfolder):
            os.makedirs(args.outputfolder)
        if not os.path.isdir(resultsFolder):
            os.makedirs(resultsFolder)

        projectFolder = ""
        # If we need to go get our own topodata.zip file and unzip it we do this
        if args.datafolder is None:
            topoDataFolder = os.path.join(args.outputfolder, "inputs")
            fileJSON, projectFolder = downloadUnzipTopo(args.visitID, topoDataFolder)
        # otherwise just pass in a path to existing data
        else:
            projectFolder = args.datafolder

        dMetricsObj = visitTopoMetrics(args.visitID, xmlfile, projectFolder, args.channelunitsjson, args.workbenchdb, None)

    except (DataException, MissingException, NetworkException) as e:
        # Exception class prints the relevant information
        traceback.print_exc(file=sys.stdout)
        sys.exit(e.returncode)
    except AssertionError as e:
        logg.error(e)
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)
    except Exception as e:
        logg.error(e)
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)

    sys.exit(0)
Beispiel #17
0
def calculate(apiData):
    """
    Calculate riparian structure metrics
    :param apiData: dictionary of API data. Key is API call name. Value is API data
    :return: metrics dictionary
    """

    raise Exception(
        'TODO: Code abandoned after it was determined that this was not needed.'
    )

    log = Logger('riparianCoverMetrics')
    log.info("Running RiparianCoverMetrics")

    # Retrieve the riparian structure API data
    riparianVals = [
        val['value'] for val in apiData['RiparianStructure']['values']
    ]

    # calculate metrics
    return _calc(riparianVals)
Beispiel #18
0
def loadChannelUnitsFromXML(xmlFilePath):

    if not path.isfile(xmlFilePath):
        raise "Missing channel unit file at {0}".format(xmlFilePath)

    tree = ET.parse(xmlFilePath)
    nodRoot = tree.getroot()

    dUnits = {}
    for nodUnit in nodRoot.findall('Units/Unit'):
        nCUNumber = int(nodUnit.find('ChannelUnitNumber').text)
        tier1 = nodUnit.find('Tier1').text
        tier2 = nodUnit.find('Tier2').text
        segment = nodUnit.find('Segment').text

        dUnits[nCUNumber] = (tier1, tier2, segment)

    log = Logger("Channel Units")
    log.info("{0} channel units loaded from XML file".format(len(dUnits)))

    return dUnits
Beispiel #19
0
def downloadExtractParseVisits(visits, outputFolder):
    log = Logger('Downloading')
    log.info("Downloading all visits from the API")

    projects = []
    for visit in visits:

        try:
            extractpath = os.path.join(outputFolder, 'VISIT_{}'.format(visit))
            projpath = os.path.join(extractpath, 'project.rs.xml')
            downloadUnzipTopo(visit, extractpath)

            proj = TopoProject(extractpath)

            if proj.isrsproject:
                projects.append({"project": proj, "visit": visit})
            else:
                log.error("File not found: {}".format(projpath))
                raise DataException("Missing Project File")

        # Just move on if something fails
        except Exception as e:
            pass

    # If we didn't get anything back then it's time to freak out a little
    if len(projects) == 0:
        raise DataException("No TopoData.zip files found for any visit")

    return projects
Beispiel #20
0
def loadChannelUnitsFromJSON(jsonFilePath):
    if jsonFilePath is not None and not path.isfile(jsonFilePath):
        raise "Missing channel unit file at {0}".format(jsonFilePath)

    dUnits = {}

    with open(jsonFilePath) as data_file:
        data = json.load(data_file)

        for nodUnit in data['values']:
            value = nodUnit['value']
            nCUNumber = int(value['ChannelUnitNumber'])
            tier1 = value['Tier1']
            tier2 = value['Tier2']
            segment = value['ChannelSegmentID']

            dUnits[nCUNumber] = (tier1, tier2, segment)

    log = Logger("Channel Units")
    log.info("{0} channel units loaded from XML file".format(len(dUnits)))

    return dUnits
Beispiel #21
0
    def buildManualFile(self, layerFileName, bMandatory):
        """
        Building a file path using manual layer file naming
        :param layerName:
        :param bMandatory:
        :return:
        """
        path = ""
        log = Logger("buildManualFile")
        try:
            match = next(file for file in os.listdir(self.directory)
                         if file.lower() == layerFileName.lower())
            path = os.path.join(self.directory, match)

        except Exception as e:
            log.warning(
                "The file called '{0}' does not exist in directory: {1}".
                format(layerFileName, self.directory))
            pass
            # if bMandatory:
            #     log.error("The file called '{0}' does not exist in directory: {1}".format(layerFileName, self.directory))
            #     raise DataException("The file called '{0}' does not exist")
        return path
Beispiel #22
0
def downloadUnzipHydroResults(visitID, unzipPath):
    """
    Download a topo zip file to a local path using a visitID
    :param visitID: visit ID
    :param unzipPath: Location to save model results
    :returns: tuple (files, projpath)
        WHERE
        dict files is api dictionary of hydro model results
        list projpath is list of rs.xml project path for each model result.
    """
    tokenator = Tokenator()
    log = Logger("downloadHydroModelResults")

    # First find the appropriate download URL
    try:
        hydroFieldFolders = APIGet(
            'visits/{}/fieldFolders/HydroModel'.format(visitID))

        log.debug("Getting visit file data")
        files = hydroFieldFolders['files']
    except Exception as e:
        raise MissingException(
            "ERROR: No hydro results found for visit: {}".format(visitID))

    projpath = []
    for file in files:
        downloadUrl = file['downloadUrl']
        # Download the file to a temporary location
        with NamedTemporaryFile() as f:
            response = APIGet(downloadUrl, absolute=True)
            f.write(response.content)

            log.debug("Downloaded file: {} to: {}".format(downloadUrl, f.name))

            # Now we have it. Unzip
            with zipfile.ZipFile(f, 'r') as zip_ref:
                unzipPathModel = os.path.join(unzipPath, f.name.rstrip(".zip"))
                log.debug("Unzipping file: {} to: {}".format(
                    f.name, unzipPathModel))
                zip_ref.extractall(unzipPathModel)

            # Return a folder where we can find a project.rs.xml (or not)
            for root, subFolders, files in os.walk(unzipPath):
                if "project.rs.xml" in files:
                    projpath.append(root)

    return files, projpath
def process_duplicate_folder(with_spaces, no_spaces):

    log = Logger('Duplicate')

    movers = []
    for root, dirs, files in os.walk(with_spaces):
        for name in files:
            old_path = os.path.join(root, name)
            new_path = old_path.replace(with_spaces, no_spaces)

            # Simply delete the file if it is zero bytes
            if os.stat(old_path).st_size == 0:
                log.info('Deleting zero byte file {}'.format(old_path))
                os.remove(old_path)
                continue

            if not os.path.isdir(os.path.dirname(new_path)):
                os.makedirs(os.path.dirname(new_path))

            if os.path.isfile(new_path):
                os.remove(old_path)
            else:
                print('Moving file {}'.format(old_path))
                os.rename(old_path, new_path)
Beispiel #24
0
def download_file(file_dict, folder):

    log = Logger('Download')

    if not file_dict['name']:
        log.warning('Missing file name in folder {}'.format(folder))
        return

    if not file_dict['downloadUrl'] or file_dict['downloadUrl'].lower(
    ) == '?download':
        log.warning('Missing download URL in folder {}'.format(folder))
        return

    file_path = os.path.join(folder, file_dict['name'])

    if not os.path.isdir(folder):
        os.makedirs(folder)

    # Write file info as JSON
    with open(os.path.splitext(file_path)[0] + '.json', 'w') as json_file:
        json.dump(file_dict, json_file)

    # Skip files that exist unless they are zero bytes in which case remove them
    if os.path.isfile(file_path):
        if os.stat(file_path).st_size == 0:
            log.warning('Removing zero byte file {}'.format(file_path))
            os.remove(file_path)
        else:
            return

    # Download missing file
    with open(file_path, 'w+b') as f:
        response = APIGet(file_dict['downloadUrl'], absolute=True)
        f.write(response.content)

    log.info('Downloaded missing file {}'.format(file_path))
Beispiel #25
0
def downloadUnzipTopo(visitID, unzipPath):
    """
    Download a topo zip file to a local path using a visitID
    :param visitID:
    :param zipFilePath:
    :return:
    """
    tokenator = Tokenator()
    log = Logger("downloadTopoZip")

    # First find the appropriate download URL
    try:
        topoFieldFolders = APIGet(
            'visits/{}/fieldFolders/Topo'.format(visitID))

        log.debug("Getting visit file data")
        file = next(file for file in topoFieldFolders['files']
                    if file['componentTypeID'] == 181)
        downloadUrl = file['downloadUrl']
    except Exception as e:
        raise MissingException(
            "ERROR: No TopoData.zip file found for visit: {}".format(visitID))

    # Download the file to a temporary location
    with NamedTemporaryFile() as f:
        response = APIGet(downloadUrl, absolute=True)
        f.write(response.content)

        log.debug("Downloaded file: {} to: {}".format(downloadUrl, f.name))

        # Now we have it. Unzip
        with zipfile.ZipFile(f, 'r') as zip_ref:
            log.debug("Unzipping file: {} to: {}".format(f.name, unzipPath))
            zip_ref.extractall(unzipPath)

        # Return a folder where we can find a project.rs.xml (or not)
        projpath = None
        for root, subFolders, files in os.walk(unzipPath):
            if "project.rs.xml" in files:
                projpath = root

    return file, projpath
def champ_topo_checker(workbench, folder):

    log = Logger('CHaMP Files')
    log.setup(logPath=os.path.join(
        folder,
        datetime.now().strftime("%Y%m%d-%H%M%S") + '_champ_folder_check.log'))

    # # Loop over site names organized by field season and watershed
    # dbCon = sqlite3.connect(workbench)
    # dbCurs = dbCon.cursor()
    # dbCurs.execute('SELECT WatershedName, VisitYear, SiteName' +
    #     ' FROM vwVisits WHERE ProgramID = 1 AND ProtocolID IN (2030, 416, 806, 1966, 2020, 1955, 1880, 10036, 9999)' +
    #     ' GROUP BY WatershedName, VisitYear, SiteName' +
    #     ' ORDER BY VisitYear, WatershedName, SiteName')
    #
    # for row in dbCurs.fetchall():
    #
    #     watershed = row[0]
    #     visit_year = row[1]
    #     site = row[2]
    #     # visitID = row[3]
    #
    #     visit_path1 = os.path.join(folder, str(visit_year), watershed.replace(' ', ''), site)
    #     visit_path2 = visit_path1.replace(' ', '')
    #     if ' ' in site and os.path.isdir(visit_path1) and os.path.isdir(visit_path2):
    #         try:
    #             process_duplicate_folder(visit_path1, visit_path2)
    #         except Exception as e:
    #             log.error('Error processing {}'.format(visit_path1))

    # Create a List
    listOfEmptyDirs = list()
    # Iterate over the directory tree and check if directory is empty.
    for (dirpath, dirnames, filenames) in os.walk(folder):
        if len(dirnames) == 0 and len(filenames) == 0:
            listOfEmptyDirs.append(dirpath)

    print(len(listOfEmptyDirs), 'empty folders')
    for empty in listOfEmptyDirs:
        os.rmdir(empty)

    log.info('Process Complete')
Beispiel #27
0
def exportAsCSV(feats, outCSVfile):
    log = Logger("CSVExport")
    log.info("Beginning CSV Export")
    with open(outCSVfile, "wb") as csvfile:
        csvWriter = csv.writer(csvfile)
        #fieldsGIS = ("POINT_NUMBER", "SHAPE@Y", "SHAPE@X", "SHAPE@Z", "DESCRIPTION")
        csvWriter.writerow(("PNTNO", "Y", "X", "ELEV", "DESC"))
        for feat in feats:
            # Do some checking on mandatory fields first
            pnfield = getfield(feat, [
                "POINT_NUMB", "Point_Numb", "numb", "Number", "Point",
                "points", "p", "Point_Id", "PointId", "POINT_ID", "POINTID",
                "PointNumbe", "Point_id", "Name", "FID", "OBJECTID"
            ])
            cfield = getfield(feat, ["Code", "CODE"])
            row = (feat['fields'][pnfield], feat['geometry'].x,
                   feat['geometry'].y, feat['geometry'].z,
                   feat['fields'][cfield])
            csvWriter.writerow(row)
    log.info("CSV Export complete")
    return outCSVfile
Beispiel #28
0
def visitTopoAuxMetrics(visitID, metricXMLPath):

    log = Logger('Metrics')
    log.info("Topo aux metrics for visit {0}".format(visitID))

    # Make all the API calls and return a dictionary of API call name keyed to data
    apiData = downloadAPIData(visitID)

    # Dictionary to hold the metric values
    visitMetrics = {}

    metric_uc = UndercutMetrics(apiData)
    integrateMetricDictionaryWithTopLevelType(visitMetrics, 'Undercut',
                                              metric_uc.metrics)

    metrics_su = SubstrateMetrics(apiData)
    integrateMetricDictionaryWithTopLevelType(visitMetrics, 'Substrate',
                                              metrics_su.metrics)

    metrics_si = SidechannelMetrics(apiData)
    integrateMetricDictionaryWithTopLevelType(visitMetrics, 'SideChannel',
                                              metrics_si.metrics)

    metrics_fi = FishcoverMetrics(apiData)
    integrateMetricDictionaryWithTopLevelType(visitMetrics, 'FishCover',
                                              metrics_fi.metrics)

    metrics_wo = LargeWoodMetrics(apiData)
    integrateMetricDictionaryWithTopLevelType(visitMetrics, 'LargeWood',
                                              metrics_wo.metrics)

    # Metric calculation complete. Write the topometrics to the XML file
    writeMetricsToXML(visitMetrics, visitID, '', metricXMLPath,
                      'TopoAuxMetrics', __version__)

    log.info("Metric calculation complete for visit {0}".format(visitID))
    return visitMetrics
Beispiel #29
0
def topomover(jsonfile):
    log = Logger("TopoMover")
    visitsraw = APIGet('visits')
    visitsreorg = {v['id']: v for v in visitsraw}
    visitids = [v['id'] for v in visitsraw]
    visitids.sort()

    # Load the inventory
    inventory = {}
    if os.path.isfile(jsonfile):
        try:
            with open(jsonfile, "r") as f:
                inventory = json.load(f)
        except Exception as e:
            pass

    counter = 0
    for vid in visitids:
        strvid = str(vid)
        APIVisit = visitsreorg[vid]

        if strvid not in inventory:
            inventory[strvid] = {}

        # Decide if there's anything to do:
        if APIDATEFIELD not in inventory[strvid] \
                or APIstrtodate(APIVisit[APIDATEFIELD]) > APIstrtodate(inventory[strvid][APIDATEFIELD]):
            processZipFile(inventory[strvid], APIVisit)
        else:
            log.info("Nothing to do")
        counter += 1
        log.info("STATUS: {:d}%  {:d}/{:d}".format(
            (100 * counter / len(visitids)), counter, len(visitids)))

        with open(jsonfile, "w+") as f:
            json.dump(inventory, f, indent=4, sort_keys=True)
Beispiel #30
0
def main():
    # parse command line options
    parser = argparse.ArgumentParser()
    parser.add_argument('visitID',
                        help='the id of the site to use (no spaces)',
                        type=str)
    parser.add_argument('outputfolder', help='Output folder')
    parser.add_argument(
        '--datafolder',
        help=
        '(optional) Top level folder containing TopoMetrics Riverscapes projects',
        type=str)
    parser.add_argument('--logfile',
                        help='output log file.',
                        default="",
                        type=str)
    parser.add_argument('--verbose',
                        help='Get more information in your logs.',
                        action='store_true',
                        default=False)

    args = parser.parse_args()

    # Make sure the output folder exists
    resultsFolder = os.path.join(args.outputfolder, "outputs")

    # Initiate the log file
    if args.logfile == "":
        logfile = os.path.join(resultsFolder, "cad_export.log")
    else:
        logfile = args.logfile

    logg = Logger("CADExport")
    logg.setup(logPath=logfile, verbose=args.verbose)

    try:
        # Make some folders if we need to:
        if not os.path.isdir(args.outputfolder):
            os.makedirs(args.outputfolder)
        if not os.path.isdir(resultsFolder):
            os.makedirs(resultsFolder)

        # If we need to go get our own topodata.zip file and unzip it we do this
        if args.datafolder is None:
            topoDataFolder = os.path.join(args.outputfolder, "inputs")
            fileJSON, projectFolder = downloadUnzipTopo(
                args.visitID, topoDataFolder)
        # otherwise just pass in a path to existing data
        else:
            projectFolder = args.datafolder

        projectxml = os.path.join(projectFolder, "project.rs.xml")
        finalResult = export_cad_files(projectxml, resultsFolder)

    except (MissingException, NetworkException, DataException) as e:
        traceback.print_exc(file=sys.stdout)
        sys.exit(e.returncode)
    except AssertionError as e:
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)
    except Exception as e:
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)
    sys.exit(0)