def main():
    # parse command line options
    parser = argparse.ArgumentParser()
    parser.add_argument('workbench',
                        help='Workbench database path',
                        type=argparse.FileType('r'))
    parser.add_argument('outputfolder', help='Path to output folder', type=str)
    args = parser.parse_args()

    if not os.path.isdir(args.outputfolder):
        os.makedirs(args.outputfolder)

    # Initiate the log file
    logg = Logger("Measurement Downloader")
    logfile = os.path.join(args.outputfolder, "measurement_downloader.log")
    logg.setup(logPath=logfile, verbose=False)

    try:

        metric_downloader(args.workbench.name, args.outputfolder)

    except (DataException, MissingException, NetworkException) as e:
        # Exception class prints the relevant information
        traceback.print_exc(file=sys.stdout)
        sys.exit(e.returncode)
    except AssertionError as e:
        logg.error(e)
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)
    except Exception as e:
        logg.error(e)
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)

    sys.exit(0)
Ejemplo n.º 2
0
def dryWidth(xs, rivershapeWithDonuts):
    """

    :param xs: shapely cross section object
    :param rivershapeWithDonuts: Polygon with non-qualifying donuts retained
    :return:
    """
    # Get all intersects of this crosssection with the rivershape
    log = Logger("dryWidth")
    try:
        intersects = xs.intersection(rivershapeWithDonuts.buffer(
            0))  #KMW: buffer(0) clears up invalid geoms
    except TopologicalError as e:
        log.error(e)
        raise DataException(
            "Could not perform intersection on `rivershapeWithDonuts`. Look for small, invalid islands as a possible cause."
        )

    # The intersect may be one object (LineString) or many. We have to handle both cases
    if intersects.type == "LineString":
        intersects = MultiLineString([intersects])
    elif intersects.type == "Point":
        return 0

    return sum([intersect.length for intersect in intersects])
Ejemplo n.º 3
0
def downloadExtractParseVisits(visits, outputFolder):
    log = Logger('Downloading')
    log.info("Downloading all visits from the API")

    projects = []
    for visit in visits:

        try:
            extractpath = os.path.join(outputFolder, 'VISIT_{}'.format(visit))
            projpath = os.path.join(extractpath, 'project.rs.xml')
            downloadUnzipTopo(visit, extractpath)

            proj = TopoProject(extractpath)

            if proj.isrsproject:
                projects.append({"project": proj, "visit": visit})
            else:
                log.error("File not found: {}".format(projpath))
                raise DataException("Missing Project File")

        # Just move on if something fails
        except Exception as e:
            pass

    # If we didn't get anything back then it's time to freak out a little
    if len(projects) == 0:
        raise DataException("No TopoData.zip files found for any visit")

    return projects
def main():
    # parse command line options
    parser = argparse.ArgumentParser()
    parser.add_argument('visitID', help='Visit ID', type=int)
    parser.add_argument('outputfolder', help='Path to output folder', type=str)
    parser.add_argument('--hydroprojectxml', '-p', help='(optional) hydro project xml file', type=str)
    parser.add_argument('--topoprojectxml', '-t', help='(optional) topo project xml file', type=str)
    parser.add_argument('--datafolder', help='(optional) Top level folder containing Hydro Model Riverscapes projects', type=str)
    parser.add_argument('--verbose', help='Get more information in your logs.', action='store_true', default=False )
    args = parser.parse_args()

    # Make sure the output folder exists
    resultsFolder = os.path.join(args.outputfolder, "outputs")

    # Initiate the log file
    logg = Logger("Program")
    logfile = os.path.join(resultsFolder, "hydro_gis.log")
    xmlfile = os.path.join(resultsFolder, "hydro_gis.xml")
    logg.setup(logPath=logfile, verbose=args.verbose)

    # Initiate the log file
    log = Logger("Program")
    log.setup(logPath=logfile, verbose=args.verbose)

    try:
        # Make some folders if we need to:
        if not os.path.isdir(args.outputfolder):
            os.makedirs(args.outputfolder)
        if not os.path.isdir(resultsFolder):
            os.makedirs(resultsFolder)

        # If we need to go get our own topodata.zip file and unzip it we do this
        # if args.datafolder is None:
        #     hydroDataFolder = os.path.join(args.outputfolder, "inputs")
        #     folderJSON, list_projectFolders = downloadUnzipTopo(args.visitID, hydroDataFolder)
        # # otherwise just pass in a path to existing data
        # else:
        #     list_projectFolders = args.datafolder
        # runResult = []
        # for fileJSON, projectFolder in list_projectFolders:
        result = hydro_gis_export(args.hydroprojectxml, args.topoprojectxml, resultsFolder)

        sys.exit(result)

    except (DataException, MissingException, NetworkException) as e:
        # Exception class prints the relevant information
        traceback.print_exc(file=sys.stdout)
        sys.exit(e.returncode)
    except AssertionError as e:
        log.error(e)
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)
    except Exception as e:
        log.error(e)
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)
Ejemplo n.º 5
0
def main():
    # parse command line options
    parser = argparse.ArgumentParser()
    parser.add_argument('visitID', help='Visit ID', type=int)
    parser.add_argument('outputfolder', help='Path to output folder', type=str)
    parser.add_argument('--channelunitsjson', help='(optional) json file to load channel units from', type=str)
    parser.add_argument('--workbenchdb', help='(optional) sqlite db to load channel units from', type=str)
    parser.add_argument('--datafolder', help='(optional) Top level folder containing TopoMetrics Riverscapes projects', type=str)
    parser.add_argument('--verbose', help='Get more information in your logs.', action='store_true', default=False)
    args = parser.parse_args()

    # Make sure the output folder exists
    resultsFolder = os.path.join(args.outputfolder, "outputs")

    # Initiate the log file
    logg = Logger("Program")
    logfile = os.path.join(resultsFolder, "topo_metrics.log")
    xmlfile = os.path.join(resultsFolder, "topo_metrics.xml")
    logg.setup(logPath=logfile, verbose=args.verbose)

    try:
        # Make some folders if we need to:
        if not os.path.isdir(args.outputfolder):
            os.makedirs(args.outputfolder)
        if not os.path.isdir(resultsFolder):
            os.makedirs(resultsFolder)

        projectFolder = ""
        # If we need to go get our own topodata.zip file and unzip it we do this
        if args.datafolder is None:
            topoDataFolder = os.path.join(args.outputfolder, "inputs")
            fileJSON, projectFolder = downloadUnzipTopo(args.visitID, topoDataFolder)
        # otherwise just pass in a path to existing data
        else:
            projectFolder = args.datafolder

        dMetricsObj = visitTopoMetrics(args.visitID, xmlfile, projectFolder, args.channelunitsjson, args.workbenchdb, None)

    except (DataException, MissingException, NetworkException) as e:
        # Exception class prints the relevant information
        traceback.print_exc(file=sys.stdout)
        sys.exit(e.returncode)
    except AssertionError as e:
        logg.error(e)
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)
    except Exception as e:
        logg.error(e)
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)

    sys.exit(0)
Ejemplo n.º 6
0
def main():
    # parse command line options
    parser = argparse.ArgumentParser()
    parser.add_argument('visitID', help='Visit ID', type=int)
    parser.add_argument('outputfolder', help='Path to output folder', type=str)
    parser.add_argument(
        '--datafolder',
        help=
        '(optional) Top level folder containing TopoMetrics Riverscapes projects',
        type=str)
    parser.add_argument('--verbose',
                        help='Get more information in your logs.',
                        action='store_true',
                        default=False)
    args = parser.parse_args()

    # Make sure the output folder exists
    resultsFolder = os.path.join(args.outputfolder, "outputs")

    # Initiate the log file
    logg = Logger("Program")
    logfile = os.path.join(resultsFolder, "aux_metrics.log")
    xmlfile = os.path.join(resultsFolder, "aux_metrics.xml")
    logg.setup(logPath=logfile, verbose=args.verbose)

    # Initiate the log file
    log = Logger("Program")
    log.setup(logPath=logfile, verbose=args.verbose)

    try:
        if not os.path.isdir(resultsFolder):
            os.makedirs(resultsFolder)

        runAuxMetrics(xmlfile, resultsFolder, args.visitID)

    except (DataException, MissingException, NetworkException) as e:
        # Exception class prints the relevant information
        traceback.print_exc(file=sys.stdout)
        sys.exit(e.returncode)
    except AssertionError as e:
        log.error(e)
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)
    except Exception as e:
        log.error(e)
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)

    sys.exit(0)
Ejemplo n.º 7
0
def validate(topoPath, xmlfile, visitID):
    """
    Validate champ topo data in flat folder structure
    :param topoPath: Full Path to topo data (i.e. GISLayers)
    :return: 0 for success
             1 for all code failures or unhandled problems
             2 for data issues
    """

    returnValue = 0
    log = Logger("Validation")

    survey = CHaMPSurvey()
    survey.load_topo_project(topoPath, visitID)
    validationResults = survey.validate()

    stats = {
        "errors": 0,
        "warnings": 0,
        "nottested": 0,
        "status": Status.PASS,
        "layers": {}
    }
    for datasetName, datasetResults in validationResults.items():
        layerstatus = Status.PASS
        for result in datasetResults:
            log.info("[{0:{4}<{5}}] [{1}] [{2}] {3}".format(
                result["Status"], datasetName, result["TestName"],
                result["Message"], " ", 10))
            if result[
                    "Status"] == "Error":  #or result["Status"] == "NotTested":
                stats["errors"] += 1
                stats["status"] = Status.FAIL
                layerstatus = Status.FAIL
                returnValue = 2
            elif result["Status"] == "NotTested":
                stats["warnings"] += 1
            elif result["Status"] == "Warning":
                stats["nottested"] += 1

        stats['layers'][datasetName] = layerstatus

    if len(validationResults) == 0:
        log.error("No layers found to validate")
        stats["errors"] += 1
        stats["status"] = Status.FAIL
        returnValue = 2

    # The last message is what gets picked up so let's be clever:
    if returnValue == 2:
        log.error("Validation Failed")
    else:
        log.error("Validation Passed")

    writeMetricsToXML(validationResults, stats, xmlfile)

    return returnValue
Ejemplo n.º 8
0
def main():
    # parse command line options
    parser = argparse.ArgumentParser()
    parser.add_argument('visitID',
                        help='the visit id of the site to use (no spaces)',
                        type=str)
    parser.add_argument('outputfolder', help='Output folder', type=str)
    parser.add_argument(
        'substrate_values',
        nargs='+',
        help=
        "one or more percentiles of grain size to calculate. 50 for D50, 84 for D84, etc",
        type=int)
    parser.add_argument('--out_channel_roughness_value',
                        help="i.e. 4000.0",
                        type=float,
                        default=4000.0)
    parser.add_argument('--ocular_estimates',
                        help="(optional) local json file of ocular estimates")
    parser.add_argument(
        '--datafolder',
        help=
        '(optional) local folder containing TopoMetrics Riverscapes projects',
        type=str)
    parser.add_argument('--env',
                        "-e",
                        help="(optional) local env file",
                        type=str)
    parser.add_argument('--verbose',
                        help='Get more information in your logs.',
                        action='store_true',
                        default=False)

    args = parser.parse_args()

    if not all([
            args.visitID, args.outputfolder, args.substrate_values,
            args.out_channel_roughness_value
    ]):
        print("ERROR: Missing arguments")
        parser.print_help()

    if args.env:
        setEnvFromFile(args.env)

    # Make sure the output folder exists
    resultsFolder = os.path.join(args.outputfolder, "outputs")

    # Initiate the log file
    logg = Logger("Program")
    logfile = os.path.join(resultsFolder, "substrate_raster.log")
    logg.setup(logPath=logfile, verbose=args.verbose)

    # Fiona debug-level loggers can cause problems
    logging.getLogger("Fiona").setLevel(logging.ERROR)
    logging.getLogger("fiona").setLevel(logging.ERROR)
    logging.getLogger("fiona.collection").setLevel(logging.ERROR)
    logging.getLogger("shapely.geos").setLevel(logging.ERROR)
    logging.getLogger("rasterio").setLevel(logging.ERROR)

    try:
        # Make some folders if we need to:
        if not os.path.isdir(args.outputfolder):
            os.makedirs(args.outputfolder)
        if not os.path.isdir(resultsFolder):
            os.makedirs(resultsFolder)

        # If we need to go get our own topodata.zip file and unzip it we do this
        if args.datafolder is None:
            topoDataFolder = os.path.join(args.outputfolder, "inputs")
            if not os.path.isdir(topoDataFolder):
                os.makedirs(topoDataFolder)
            fileJSON, projectFolder = downloadUnzipTopo(
                args.visitID, topoDataFolder)
        # otherwise just pass in a path to existing data
        else:
            projectFolder = args.datafolder

        if args.ocular_estimates is None:
            dict_ocular = APIGet(
                "visits/{}/measurements/Substrate%20Cover".format(
                    str(args.visitID)))
            dict_units = APIGet("visits/{}/measurements/Channel%20Unit".format(
                str(args.visitID)))
            dict_unitkey = {
                x['value']['ChannelUnitID']: x['value']['ChannelUnitNumber']
                for x in dict_units['values']
            }
            for i in range(len(dict_ocular['values'])):
                dict_ocular['values'][i]['value'][
                    'ChannelUnitNumber'] = dict_unitkey[
                        dict_ocular['values'][i]['value']['ChannelUnitID']]
        else:
            dict_ocular = json.load(open(args.ocular_estimates, 'rt'))
            dict_units = APIGet("visits/{}/measurements/Channel%20Unit".format(
                str(args.visitID)))
            dict_unitkey = {
                x['value']['ChannelUnitID']: x['value']['ChannelUnitNumber']
                for x in dict_units['values']
            }
            for i in range(len(dict_ocular['values'])):
                dict_ocular['values'][i]['value'][
                    'ChannelUnitNumber'] = dict_unitkey[
                        dict_ocular['values'][i]['value']['ChannelUnitID']]

        generate_substrate_raster(projectFolder, resultsFolder,
                                  args.substrate_values, dict_ocular,
                                  args.out_channel_roughness_value)

    except (DataException, MissingException, NetworkException) as e:
        # Exception class prints the relevant information
        traceback.print_exc(file=sys.stdout)
        sys.exit(e.returncode)
    except AssertionError as e:
        logg.error(e.message)
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)
    except Exception as e:
        logg.error(e.message)
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)

    sys.exit(0)
Ejemplo n.º 9
0
def main():
    # parse command line options
    parser = argparse.ArgumentParser()
    parser.add_argument('visitID', help='Visit ID', type=int)
    parser.add_argument('outputfolder', help='Path to output folder', type=str)
    parser.add_argument(
        '--datafolder',
        help=
        '(optional) Top level folder containing TopoMetrics Riverscapes projects',
        type=str)
    parser.add_argument('--verbose',
                        help='Get more information in your logs.',
                        action='store_true',
                        default=False)
    args = parser.parse_args()

    # Make sure the output folder exists
    resultsFolder = os.path.join(args.outputfolder, "outputs")

    # Initiate the log file
    logg = Logger("Program")
    logfile = os.path.join(resultsFolder, "bankfull_metrics.log")
    xmlfile = os.path.join(resultsFolder, "bankfull_metrics.xml")
    logg.setup(logPath=logfile, verbose=args.verbose)

    # Initiate the log file
    log = Logger("Program")
    log.setup(logPath=logfile, verbose=args.verbose)

    try:
        # Make some folders if we need to:
        if not os.path.isdir(args.outputfolder):
            os.makedirs(args.outputfolder)
        if not os.path.isdir(resultsFolder):
            os.makedirs(resultsFolder)

        # If we need to go get our own topodata.zip file and unzip it we do this
        if args.datafolder is None:
            topoDataFolder = os.path.join(args.outputfolder, "inputs")
            fileJSON, projectFolder = downloadUnzipTopo(
                args.visitID, topoDataFolder)
        # otherwise just pass in a path to existing data
        else:
            projectFolder = args.datafolder

        from champmetrics.lib.topoproject import TopoProject
        topo_project = TopoProject(
            os.path.join(projectFolder, "project.rs.xml"))
        tree = ET.parse(os.path.join(projectFolder, "project.rs.xml"))
        root = tree.getroot()
        visitid = root.findtext(
            "./MetaData/Meta[@name='Visit']") if root.findtext(
                "./MetaData/Meta[@name='Visit']"
            ) is not None else root.findtext(
                "./MetaData/Meta[@name='VisitID']")
        finalResult = BankfullMetrics(topo_project.getpath("DEM"),
                                      topo_project.getpath("DetrendedDEM"),
                                      topo_project.getpath("Topo_Points"))

        write_bfmetrics_xml(finalResult, visitid, xmlfile)
        sys.exit(0)

    except (DataException, MissingException, NetworkException) as e:
        # Exception class prints the relevant information
        traceback.print_exc(file=sys.stdout)
        sys.exit(e.returncode)
    except AssertionError as e:
        log.error(e)
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)
    except Exception as e:
        log.error(e)
        traceback.print_exc(file=sys.stdout)
        sys.exit(1)
Ejemplo n.º 10
0
def BankfullMetrics(dem, detrended_dem, shp_points):
    """
    :param topoDataFolder:
    :param results_xmlfile:
    :param visitid:
    :return:
    """

    log = Logger("Bankfull Metrics")

    # 1.  find the average elevation of crew bankfull points in the detrended DEM.
    gdf_topo_points = geopandas.GeoDataFrame().from_file(shp_points)

    gdf_bf_points = None
    if 'Code' in gdf_topo_points:
        gdf_bf_points = gdf_topo_points[gdf_topo_points["Code"] == 'bf']
    else:
        gdf_bf_points = gdf_topo_points[gdf_topo_points["code"] == 'bf']

    log.info("Loaded BF points")

    with rasterio.open(detrended_dem) as rio_detrended:
        bf_elevations = [
            v[0] for v in rio_detrended.sample(
                zip([Point(p).x for p in gdf_bf_points.geometry],
                    [Point(p).y for p in gdf_bf_points.geometry]))
            if v[0] != rio_detrended.nodata
        ]  # Filter out points not within detrendedDEM data extent.
        detrended_band = rio_detrended.read(1)

    if len(bf_elevations) == 0:
        log.error("No valid bf elevation points found.")
    else:
        log.info("Sampled {} valid BF point elevations from the DetrendedDEM".
                 format(str(len(bf_elevations))))

    with rasterio.open(dem) as rio_dem:
        dem_band = rio_dem.read(1)

    # enforce orthogonal rasters
    dem_pad_top = int(
        (rio_detrended.bounds.top - rio_dem.bounds.top) /
        0.1) if rio_detrended.bounds.top > rio_dem.bounds.top else 0
    dem_pad_bottom = int(
        (rio_dem.bounds.bottom - rio_detrended.bounds.bottom) /
        0.1) if rio_dem.bounds.bottom > rio_detrended.bounds.bottom else 0
    dem_pad_right = int(
        (rio_detrended.bounds.right - rio_dem.bounds.right) /
        0.1) if rio_detrended.bounds.right > rio_dem.bounds.right else 0
    dem_pad_left = int(
        (rio_dem.bounds.left - rio_detrended.bounds.left) /
        0.1) if rio_dem.bounds.left > rio_detrended.bounds.left else 0

    det_pad_top = int(
        (rio_dem.bounds.top - rio_detrended.bounds.top) /
        0.1) if rio_detrended.bounds.top < rio_dem.bounds.top else 0
    det_pad_bottom = int(
        (rio_detrended.bounds.bottom - rio_dem.bounds.bottom) /
        0.1) if rio_dem.bounds.bottom < rio_detrended.bounds.bottom else 0
    det_pad_right = int(
        (rio_dem.bounds.right - rio_detrended.bounds.right) /
        0.1) if rio_detrended.bounds.right < rio_dem.bounds.right else 0
    det_pad_left = int(
        (rio_detrended.bounds.left - rio_dem.bounds.left) /
        0.1) if rio_dem.bounds.left < rio_detrended.bounds.left else 0

    np_detrended_ortho = np.pad(detrended_band,
                                ((det_pad_top, det_pad_bottom),
                                 (det_pad_left, det_pad_right)),
                                mode="constant",
                                constant_values=np.nan)
    np_dem_ortho = np.pad(dem_band, ((dem_pad_top, dem_pad_bottom),
                                     (dem_pad_left, dem_pad_right)),
                          mode="constant",
                          constant_values=np.nan)

    if all(v == 0 for v in [
            dem_pad_top, dem_pad_bottom, dem_pad_right, dem_pad_left,
            det_pad_top, det_pad_bottom, det_pad_right, det_pad_left
    ]):
        log.info("DEM and DetrendedDEM have concurrent extents")
    else:
        log.warning(
            "Non-Concurrent Rasters encountered. DEM and DetrendedDEM using padded extents"
        )

    ma_detrended = np.ma.MaskedArray(
        np_detrended_ortho, np.equal(np_detrended_ortho, rio_detrended.nodata))
    ma_dem = np.ma.MaskedArray(np_dem_ortho,
                               np.equal(np_dem_ortho, rio_dem.nodata))

    # Generate Trend Grid
    np_trendgrid = np.subtract(ma_dem, ma_detrended)
    log.info("Trend surface created")

    # Average BF elev to constant raster in detrended space
    ave_bf_det_elev = sum(bf_elevations) / float(len(bf_elevations))
    ma_bf_detrended = np.full_like(ma_detrended,
                                   ave_bf_det_elev,
                                   dtype=np.float64)
    log.info("Detrended BF surface created")

    # add trend grid to BF detrended surface
    np_bf_surface = np.add(ma_bf_detrended, np_trendgrid)
    log.info("BF elevation surface created")

    # Generate depth and volume
    np_bf_depth_raw = np.subtract(np_bf_surface, ma_dem)
    np_bf_depth = np.multiply(np.greater(np_bf_depth_raw, 0), np_bf_depth_raw)
    np_bf_volume = np.multiply(np_bf_depth, 0.1 * 0.1)
    log.info("BF Depth surface created")

    ma_bf_depth = np.ma.MaskedArray(np_bf_depth, np.equal(
        np_bf_depth,
        -0.0))  # -0.0 values were getting included in the mean calculation

    # Run ZonalStatisticsAsTable to get the metric values:
    # Sum the bankfull depth raster values and multiply by the area of one cell to produce BFVol.
    # Max the bankfull depth raster values is DepthBF_Max.
    # Average the bankfull depth raster values is DepthBF_Avg
    bf_volume = np.nansum(np_bf_volume)
    bf_depth_max = np.nanmax(ma_bf_depth)
    bf_depth_mean = np.nanmean(ma_bf_depth)
    log.info("BF metrics calculated")

    results = {
        "Volume": bf_volume,
        "Depth": {
            "Max": bf_depth_max,
            "Mean": bf_depth_mean
        }
    }

    return results
Ejemplo n.º 11
0
def APICall(url, absolute=False, method=requests.get):
    """
    APICall is a catch-all function for any Sitka API call
    :param url:
    :param absolute:
    :return:
    """
    if os.environ.get('API_BASE_URL') is None:
        raise Exception("Missing API_BASE_URL")

    tokenator = Tokenator()
    log = Logger("API:Call")

    if absolute == False:
        url = "{0}/{1}".format(os.environ.get('API_BASE_URL'), url)

    retry = True
    retries = 0
    response = None

    while retry:
        retries += 1

        headers = {"Authorization": tokenator.TOKEN}

        # No more retries allowed
        try:
            response = method(url, headers=headers, verify=verification())
        except Exception as e:
            errmsg = "Connection Exception: Request exception caught and will be retried: `{}` Retry: {}/{}".format(
                e, retries, RETRIES_ALLOWED)
            if retry and retries >= RETRIES_ALLOWED:
                raise NetworkException(errmsg)
            else:
                log.error(errmsg)
            continue

        # 200 codes mean good
        code = response.status_code
        errmsg = ""
        if code >= 200 and code < 300:
            retry = False
        elif code == 401:
            # This means our token has expired and we should get a new one
            errmsg = "401 Authorization error: Problem with API Call. Getting new token Retry#: {1}/{2}".format(
                retries, RETRIES_ALLOWED)
            tokenator.reset()
            tokenator.getToken()
        elif code >= 400 and code < 500:
            raise MissingException("{} Error: {}".format(code, url))
        elif code >= 500 and code < 600:
            errmsg = "500 Error: Problem with API Call. Retrying after {0} seconds Retry#: {1}/{2}".format(
                RETRY_DELAY, retries, RETRIES_ALLOWED)
            time.sleep(RETRY_DELAY)
        else:
            errmsg = "UNKNOWN ERROR: Problem with API Call. Retrying after {0} seconds Retry#: {1}/{2}".format(
                RETRY_DELAY, retries, RETRIES_ALLOWED)
            time.sleep(RETRY_DELAY)

        if retry:
            if retries >= RETRIES_ALLOWED:
                raise NetworkException(errmsg)
            else:
                log.error(errmsg)

    # Simple JSON responses just return the parsed json. If this is binary data though we return the whole response object
    # and let the called deal with it.
    if 'json' in response.headers['content-type']:
        respObj = json.loads(response.content)
        return respObj
    else:
        return response
Ejemplo n.º 12
0
def champ_topo_checker(workbench, folder):
    log = Logger('CHaMP Files')
    log.setup(logPath=os.path.join(
        folder,
        datetime.now().strftime("%Y%m%d-%H%M%S") + '_champ_files.log'))

    dbCon = sqlite3.connect(workbench)
    dbCurs = dbCon.cursor()
    dbCurs.execute(
        'SELECT WatershedName, VisitYear, SiteName, VisitID' +
        ' FROM vwVisits WHERE ProgramID = 1 AND  ProtocolID IN (2030, 416, 806, 1966, 2020, 1955, 1880, 10036, 9999)'
        + ' ORDER BY VisitYear, WatershedName')

    for row in dbCurs.fetchall():
        watershed = row[0]
        visit_year = row[1]
        site = row[2]
        visitID = row[3]

        visit_path = os.path.join(folder, str(visit_year),
                                  watershed.replace(' ', ''),
                                  site.replace(' ', ''),
                                  'VISIT_{}'.format(visitID))
        log.info('Processing {}'.format(visit_path))

        if not os.path.isdir(visit_path):
            os.makedirs(visit_path)

        try:
            visit_data = APIGet('visits/{}'.format(visitID))

            # Write visit information to json file
            with open(os.path.join(visit_path, 'visit_info.json'),
                      'w') as json_file:
                json.dump(visit_data, json_file)

            # Loop over the two lists of folders per visit: field folders and visit folders
            for api_key, local_folder in {
                    'fieldFolders': 'Field Folders',
                    'folders': 'Visit Folders'
            }.items():

                if api_key in visit_data and isinstance(
                        visit_data[api_key], list):
                    for folder_name in visit_data[api_key]:
                        field_folder_path = os.path.join(
                            visit_path, local_folder, folder_name['name'])
                        field_folder_data = APIGet(folder_name['url'], True)

                        if isinstance(field_folder_data,
                                      dict) and 'files' in field_folder_data:
                            [
                                download_file(file_dict, field_folder_path)
                                for file_dict in field_folder_data['files']
                            ]

            # Get all the miscellaneous files for the visit
            [
                download_file(file_dict, os.path.join(visit_path, 'Files'))
                for file_dict in visit_data['files']
            ]

        except Exception as e:
            log.error('Error for visit {}: {}'.format(visitID, e))

    log.info('Process Complete')