Beispiel #1
0
    if args.end is not None:
        dt = datetime.strptime(args.end, '%Y%m%dT%H%M%S')
        end = dt
    else:
        end = None

    cl = CANONLoader(args.database, args.campaign)

    if args.post:
        token = os.environ['SLACKTOKEN']
        slack = Slacker(token)

    # Assume that the database has already been created with description and terrain information, so use minimal arguments in constructor
    lm = LakeMILoader(args.database, args.campaign)
    lm.dbAlias = args.database
    lm.campaignName = args.campaign

    # Get directory list from sites
    s = args.inUrl.rsplit('/', 1)
    files = s[1]
    url = s[0]
    logger.info("Crawling %s for %s files", url, files)
    c = Crawl(os.path.join(url, 'catalog.xml'), select=[files], debug=False)

    for d in c.datasets:
        logger.debug('Found %s', d.id)

    urls = [
        s2.get("url") for d in c.datasets for s2 in d.services
        if s2.get("service").lower() == "opendap"
    ]
Beispiel #2
0
    if args.end is not None:
        dt = datetime.strptime(args.end, '%Y%m%dT%H%M%S')
        end = dt
    else:
        end = None

    cl = CANONLoader(args.database, args.campaign)

    if args.post:
        token = os.environ['SLACKTOKEN']
        slack = Slacker(token)

    # Assume that the database has already been created with description and terrain information, so use minimal arguments in constructor
    lm = LakeMILoader(args.database, args.campaign)
    lm.dbAlias = args.database
    lm.campaignName = args.campaign
   
    # Get directory list from sites
    s = args.inUrl.rsplit('/',1)
    files = s[1]
    url = s[0]
    logger.info("Crawling %s for %s files", url, files)
    c = Crawl(os.path.join(url, 'catalog.xml'), select=[files], debug=False)

    for d in c.datasets:
        logger.debug('Found %s', d.id)
    
    urls = [s2.get("url") for d in c.datasets for s2 in d.services if s2.get("service").lower() == "opendap"]

    pw = lrauvNc4ToNetcdf.InterpolatorWriter()