Beispiel #1
0
parentDir = os.path.join(os.path.dirname(__file__), "../")
sys.path.insert(0, parentDir)  # So that CANON is found

from LakeMichigan import LakeMILoader
from loaders import FileNotFound
from thredds_crawler.crawl import Crawl
from thredds_crawler.etree import etree

cl = LakeMILoader(
    'stoqs_michigan2016',
    'Lake Michigan LRAUV Experiment 2016',
    description='LRAUV 2016 Experiment in Lake Michigan',
    x3dTerrains={
        'http://stoqs.mbari.org/x3d/michigan_lld_10x/michigan_lld_10x_src_scene.x3d':
        {
            'position': '277414.36721 -5207201.16684 4373105.96194',
            'orientation': '0.99821 -0.05662 0.01901 1.48579',
            'centerOfRotation':
            '281401.0288298179 -4639090.577582279 4354217.4974804',
            'VerticalExaggeration': '10',
            'speed': '0.1',
        }
    },
    grdTerrain=os.path.join(parentDir, 'michigan_lld.grd'))

# Set start and end dates for all loads from sources that contain data
# beyond the temporal bounds of the campaign
#
startdate = datetime.datetime(2016, 7, 24)  # Fixed start
enddate = datetime.datetime(2016, 8, 24)  # Fixed end

# default location of thredds and dods data:
Beispiel #2
0
        start = None

    if args.end is not None:
        dt = datetime.strptime(args.end, '%Y%m%dT%H%M%S')
        end = dt
    else:
        end = None

    cl = CANONLoader(args.database, args.campaign)

    if args.post:
        token = os.environ['SLACKTOKEN']
        slack = Slacker(token)

    # Assume that the database has already been created with description and terrain information, so use minimal arguments in constructor
    lm = LakeMILoader(args.database, args.campaign)
    lm.dbAlias = args.database
    lm.campaignName = args.campaign
   
    # Get directory list from sites
    s = args.inUrl.rsplit('/',1)
    files = s[1]
    url = s[0]
    logger.info("Crawling %s for %s files", url, files)
    c = Crawl(os.path.join(url, 'catalog.xml'), select=[files], debug=False)

    for d in c.datasets:
        logger.debug('Found %s', d.id)
    
    urls = [s2.get("url") for d in c.datasets for s2 in d.services if s2.get("service").lower() == "opendap"]
Beispiel #3
0
        start = None

    if args.end is not None:
        dt = datetime.strptime(args.end, '%Y%m%dT%H%M%S')
        end = dt
    else:
        end = None

    cl = CANONLoader(args.database, args.campaign)

    if args.post:
        token = os.environ['SLACKTOKEN']
        slack = Slacker(token)

    # Assume that the database has already been created with description and terrain information, so use minimal arguments in constructor
    lm = LakeMILoader(args.database, args.campaign)
    lm.dbAlias = args.database
    lm.campaignName = args.campaign

    # Get directory list from sites
    s = args.inUrl.rsplit('/', 1)
    files = s[1]
    url = s[0]
    logger.info("Crawling %s for %s files", url, files)
    c = Crawl(os.path.join(url, 'catalog.xml'), select=[files], debug=False)

    for d in c.datasets:
        logger.debug('Found %s', d.id)

    urls = [
        s2.get("url") for d in c.datasets for s2 in d.services
Beispiel #4
0
parentDir = os.path.join(os.path.dirname(__file__), "../")
sys.path.insert(0, parentDir)  # So that CANON is found

from LakeMichigan import LakeMILoader
from loaders import FileNotFound
from thredds_crawler.crawl import Crawl
from thredds_crawler.etree import etree

cl = LakeMILoader('stoqs_michigan2016', 'Lake Michigan LRAUV Experiment 2016',
                    description = 'LRAUV 2016 Experiment in Lake Michigan',
                    x3dTerrains = {
                                    'http://stoqs.mbari.org/x3d/michigan_lld_10x/michigan_lld_10x_src_scene.x3d': {
                                        'position': '277414.36721 -5207201.16684 4373105.96194',
                                        'orientation': '0.99821 -0.05662 0.01901 1.48579',
                                        'centerOfRotation': '281401.0288298179 -4639090.577582279 4354217.4974804',
                                        'VerticalExaggeration': '10',
                                        'speed': '0.1',
                                    }
                    },
                    grdTerrain = os.path.join(parentDir, 'michigan_lld.grd')
                  )

# Set start and end dates for all loads from sources that contain data
# beyond the temporal bounds of the campaign
#
startdate = datetime.datetime(2016, 7, 24)      # Fixed start
enddate = datetime.datetime(2016, 8, 24)        # Fixed end


# default location of thredds and dods data: