Esempio n. 1
0
        start = None

    if args.end is not None:
        dt = datetime.strptime(args.end, '%Y%m%dT%H%M%S')
        end = dt
    else:
        end = None

    if args.post:
        token = os.environ['SLACKTOKEN']
        slack = Slacker(token)

    # Assume that the database has already been created with description and terrain information, so use minimal arguments in constructor
    cl = CANONLoader(args.database, args.campaign)
    cl.dbAlias = args.database
    cl.campaignName = args.campaign

    # Get directory list from sites
    url, files = args.inUrl.rsplit('/', 1)
    logger.info("Crawling %s for %s files", url, files)
    skips = Crawl.SKIPS + [
        ".*Courier*", ".*Express*", ".*Normal*, '.*Priority*", ".*.cfg$",
        ".*.js$", ".*.kml$", ".*.log$"
    ]
    c = Crawl(os.path.join(url, 'catalog.xml'),
              select=[files],
              debug=False,
              skip=skips)

    for d in c.datasets:
        logger.debug('Found %s', d.id)
Esempio n. 2
0
        start = None

    if args.end is not None:
        dt = datetime.strptime(args.end, '%Y%m%dT%H%M%S')
        end = dt
    else:
        end = None

    if args.post:
        token = os.environ['SLACKTOKEN']
        slack = Slacker(token)

    # Assume that the database has already been created with description and terrain information, so use minimal arguments in constructor
    cl = CANONLoader(args.database, args.campaign)
    cl.dbAlias = args.database
    cl.campaignName = args.campaign
   
    # Get directory list from sites
    s = args.inUrl.rsplit('/',1)
    files = s[1]
    url = s[0]
    logger.info("Crawling %s for %s files" % (url, files))
    c = Crawl(os.path.join(url, 'catalog.xml'), select=[files], debug=False)

    for d in c.datasets:
        logger.debug('Found %s' % d.id)
    
    urls = [s.get("url") for d in c.datasets for s in d.services if s.get("service").lower() == "opendap"]

    pw = lrauvNc4ToNetcdf.InterpolatorWriter()