コード例 #1
0
ファイル: get_datasets_in_node.py プロジェクト: mapbox/usgs
def get_datasets_in_nodes():
    """
    Get the node associated with each dataset. Some datasets
    will have an ambiguous node since they exists in more than
    one node.
    """

    data_dir = os.path.join(scriptdir, "..", "usgs", "data")

    cwic = map(lambda d: d["datasetName"], api.datasets(None, CWIC_LSI_EXPLORER_CATALOG_NODE)['data'])
    ee = map(lambda d: d["datasetName"], api.datasets(None, EARTH_EXPLORER_CATALOG_NODE)['data'])
    hdds = map(lambda d: d["datasetName"], api.datasets(None, HDDS_EXPLORER_CATALOG_NODE)['data'])
    lpcs = map(lambda d: d["datasetName"], api.datasets(None, LPCS_EXPLORER_CATALOG_NODE)['data'])

    # Create mapping from dataset to node
    datasets = {}
    datasets.update( { ds : "CWIC" for ds in cwic } )
    datasets.update( { ds : "EE" for ds in ee } )
    datasets.update( { ds : "HDDS" for ds in hdds } )
    datasets.update( { ds : "LPCS" for ds in lpcs } )

    datasets_path = os.path.join(data_dir, "datasets.json")
    with open(datasets_path, "w") as f:
        f.write(json.dumps(datasets))

    # Find the datasets with ambiguous nodes
    cwic_ee = [ds for ds in cwic if ds in ee]
    cwic_hdds = [ds for ds in cwic if ds in hdds]
    cwic_lpcs = [ds for ds in cwic if ds in lpcs]
    ee_hdds = [ds for ds in ee if ds in hdds]
    ee_lpcs = [ds for ds in ee if ds in lpcs]
    hdds_lpcs = [ds for ds in hdds if ds in lpcs]
コード例 #2
0
ファイル: create_snapshots.py プロジェクト: mapbox/usgs
def create_snapshots():
    """
    Run requests against USGS API for use in tests.
    """

    api_key = api.login(os.environ['USGS_USERNAME'], os.environ['USGS_PASSWORD'])

    # Dataset Fields
    response = api.dataset_fields("LANDSAT_8_C1", "EE", api_key=api_key)
    write_response(response, 'dataset-fields.json')

    # Datasets
    response = api.datasets(None, "EE")
    write_response(response, 'datasets.json')

    # Download
    response = api.download("LANDSAT_8_C1", "EE", ["LC80810712017104LGN00"], product='STANDARD')
    write_response(response, 'download.json')

    # Download Options
    response = api.download_options("LANDSAT_8_C1", "EE", ["LC80810712017104LGN00"])
    write_response(response, 'download-options.json')

    # Metadata
    response = api.metadata("LANDSAT_8_C1", "EE", ["LC80810712017104LGN00"])
    write_response(response, 'metadata.json')

    # Search
    response = api.search("LANDSAT_8_C1", "EE", start_date='20170401', end_date='20170402', max_results=10)
    write_response(response, 'search.json')

    api.logout(api_key)
コード例 #3
0
def get_datasets_in_nodes():
    """
    Get the node associated with each dataset. Some datasets
    will have an ambiguous node since they exists in more than
    one node.
    """

    cur_dir = os.path.dirname(os.path.realpath(__file__))
    data_dir = os.path.join(cur_dir, "..", "data")

    cwic = map(lambda d: d["datasetName"],
               api.datasets(None, CWIC_LSI_EXPLORER_CATALOG_NODE))
    ee = map(lambda d: d["datasetName"],
             api.datasets(None, EARTH_EXPLORER_CATALOG_NODE))
    hdds = map(lambda d: d["datasetName"],
               api.datasets(None, HDDS_EXPLORER_CATALOG_NODE))
    lpvs = map(lambda d: d["datasetName"],
               api.datasets(None, LPVS_EXPLORER_CATALOG_NODE))

    # Create mapping from dataset to node
    datasets = {}
    datasets.update({ds: "cwic" for ds in cwic})
    datasets.update({ds: "ee" for ds in ee})
    datasets.update({ds: "hdds" for ds in hdds})
    datasets.update({ds: "lpvs" for ds in lpvs})

    datasets_path = os.path.join(data_dir, "datasets.json")
    with open(datasets_path, "w") as f:
        f.write(json.dumps(datasets))

    # Luckily there are only four nodes.

    # Find the datasets with ambiguous nodes
    cwic_ee = [ds for ds in cwic if ds in ee]
    cwic_hdds = [ds for ds in cwic if ds in hdds]
    cwic_lpvs = [ds for ds in cwic if ds in lpvs]

    ee_hdds = [ds for ds in ee if ds in hdds]
    ee_lpvs = [ds for ds in ee if ds in lpvs]

    hdds_lpvs = [ds for ds in hdds if ds in lpvs]
コード例 #4
0
ファイル: test_api.py プロジェクト: danlopez00/usgs
def test_datasets():

    expected_keys = [
        "bounds", "datasetName", "datasetFullName", "endDate", "startDate",
        "supportDownload", "supportBulkDownload", "bulkDownloadOrderLimit",
        "supportOrder", "orderLimit", "totalScenes"
    ]

    results = api.datasets(None, "EE")
    for item in results:
        for key in expected_keys:
            assert item.get(key) is not None
コード例 #5
0
ファイル: test_api.py プロジェクト: jonas-eberle/usgs
def test_datasets():

    expected_keys = [
        "bounds", "datasetName", "datasetFullName",
        "endDate", "startDate", "supportDownload",
        "supportBulkDownload", "bulkDownloadOrderLimit",
        "supportOrder", "orderLimit", "totalScenes"
    ]

    results = api.datasets(None, "EE")
    for item in results:
        for key in expected_keys:
            assert item.get(key) is not None
コード例 #6
0
def get_datasets_in_nodes():
    """
    Get the node associated with each dataset. Some datasets
    will have an ambiguous node since they exists in more than
    one node.
    """
    
    cur_dir = os.path.dirname(os.path.realpath(__file__))
    data_dir = os.path.join(cur_dir, "..", "data")
    
    cwic = map(lambda d: d["datasetName"], api.datasets(None, CWIC_LSI_EXPLORER_CATALOG_NODE))
    ee = map(lambda d: d["datasetName"], api.datasets(None, EARTH_EXPLORER_CATALOG_NODE))
    hdds = map(lambda d: d["datasetName"], api.datasets(None, HDDS_EXPLORER_CATALOG_NODE))
    lpvs = map(lambda d: d["datasetName"], api.datasets(None, LPVS_EXPLORER_CATALOG_NODE))
    
    # Create mapping from dataset to node
    datasets = {}
    datasets.update( { ds : "cwic" for ds in cwic } )
    datasets.update( { ds : "ee" for ds in ee } )
    datasets.update( { ds : "hdds" for ds in hdds } )
    datasets.update( { ds : "lpvs" for ds in lpvs } )
    
    datasets_path = os.path.join(data_dir, "datasets.json")
    with open(datasets_path, "w") as f:
        f.write(json.dumps(datasets))
    
    # Luckily there are only four nodes.
    
    # Find the datasets with ambiguous nodes
    cwic_ee = [ds for ds in cwic if ds in ee]
    cwic_hdds = [ds for ds in cwic if ds in hdds]
    cwic_lpvs = [ds for ds in cwic if ds in lpvs]
    
    ee_hdds = [ds for ds in ee if ds in hdds]
    ee_lpvs = [ds for ds in ee if ds in lpvs]
    
    hdds_lpvs = [ds for ds in hdds if ds in lpvs]
コード例 #7
0
def get_datasets_in_nodes():
    """
    Get the node associated with each dataset. Some datasets
    will have an ambiguous node since they exists in more than
    one node.
    """

    data_dir = os.path.join(scriptdir, "..", "usgs", "data")

    cwic = map(lambda d: d["datasetName"],
               api.datasets(None, CWIC_LSI_EXPLORER_CATALOG_NODE)['data'])
    ee = map(lambda d: d["datasetName"],
             api.datasets(None, EARTH_EXPLORER_CATALOG_NODE)['data'])
    hdds = map(lambda d: d["datasetName"],
               api.datasets(None, HDDS_EXPLORER_CATALOG_NODE)['data'])
    lpcs = map(lambda d: d["datasetName"],
               api.datasets(None, LPCS_EXPLORER_CATALOG_NODE)['data'])

    # Create mapping from dataset to node
    datasets = {}
    datasets.update({ds: "CWIC" for ds in cwic})
    datasets.update({ds: "EE" for ds in ee})
    datasets.update({ds: "HDDS" for ds in hdds})
    datasets.update({ds: "LPCS" for ds in lpcs})

    datasets_path = os.path.join(data_dir, "datasets.json")
    with open(datasets_path, "w") as f:
        f.write(json.dumps(datasets))

    # Find the datasets with ambiguous nodes
    cwic_ee = [ds for ds in cwic if ds in ee]
    cwic_hdds = [ds for ds in cwic if ds in hdds]
    cwic_lpcs = [ds for ds in cwic if ds in lpcs]
    ee_hdds = [ds for ds in ee if ds in hdds]
    ee_lpcs = [ds for ds in ee if ds in lpcs]
    hdds_lpcs = [ds for ds in hdds if ds in lpcs]
コード例 #8
0
def create_snapshots():
    """
    Run requests against USGS API for use in tests.
    """

    api_key = api.login(os.environ['USGS_USERNAME'],
                        os.environ['USGS_PASSWORD'])

    # Dataset Fields
    response = api.dataset_fields("LANDSAT_8_C1", "EE", api_key=api_key)
    write_response(response, 'dataset-fields.json')

    # Datasets
    response = api.datasets(None, "EE")
    write_response(response, 'datasets.json')

    # Download
    response = api.download("LANDSAT_8_C1",
                            "EE", ["LC80810712017104LGN00"],
                            product='STANDARD')
    write_response(response, 'download.json')

    # Download Options
    response = api.download_options("LANDSAT_8_C1", "EE",
                                    ["LC80810712017104LGN00"])
    write_response(response, 'download-options.json')

    # Metadata
    response = api.metadata("LANDSAT_8_C1", "EE", ["LC80810712017104LGN00"])
    write_response(response, 'metadata.json')

    # Search
    response = api.search("LANDSAT_8_C1",
                          "EE",
                          start_date='20170401',
                          end_date='20170402',
                          max_results=10)
    write_response(response, 'search.json')

    api.logout(api_key)
コード例 #9
0
ファイル: fetch_hdds_images.py プロジェクト: mvonpohle/delta
def get_dataset_list(options):
    """Return a list of all available HDDS datasets, each entry is (datasetName, pretty name)"""

    dataset_cache_path = os.path.join(options.output_folder,
                                      'dataset_list.csv')
    name_list = []
    if not os.path.exists(dataset_cache_path) or options.refetch_datasets:

        # Each event is a dataset, start by fetching the list of all HDDS datasets.
        print('Submitting HDDS dataset query...')
        results = api.datasets("", CATALOG)
        print(results)
        if not results['data']:
            raise Exception('Did not find any HDDS data!')
        print('Found ' + str(len(results['data'])) + ' matching datasets.')

        # Go through all the datasets and identify the events we are interested in.
        TARGET_TYPES = [
            'flood', 'hurricane', 'cyclone', 'tsunami', 'dam_collapse', 'storm'
        ]
        SKIP = ['test', 'icestorm', 'snowstorm', 'adhoc', 'ad hoc',
                'ad_hoc']  # TODO: What is ad hoc here?

        handle = open(dataset_cache_path, 'w')

        for ds in results['data']:

            #print('Found match: ' + ds['datasetFullName'])
            full_name = ds['datasetFullName'].lower()

            bad = False
            for s in SKIP:
                if s in full_name:
                    bad = True
                    break
            if bad:
                continue

            target = False
            for t in TARGET_TYPES:
                if t in full_name:
                    target = True
                    break
            if not target:
                #print('Not a target!')
                continue

            if not ds['supportDownload']:
                #print('Downloads not supported!')
                continue

            print(ds['datasetName'] + ',' + full_name)
            handle.write(ds['datasetName'] + ',' + ds['datasetFullName'] +
                         '\n')
            name_list.append((ds['datasetName'], ds['datasetFullName']))
        handle.close()

    else:
        # Cache exists, load the dataset list from the cache
        with open(dataset_cache_path, 'r') as handle:
            for line in handle:
                parts = line.strip().split(',')
                print(parts)
                name_list.append(parts)

    return name_list
コード例 #10
0
ファイル: cli.py プロジェクト: HydroLogic/usgs
def datasets(node):
    data = api.datasets(None, node)
    print(json.dumps(data))
コード例 #11
0
def datasets(node, start_date, end_date):
    data = api.datasets(None, node, start_date=start_date, end_date=end_date)
    click.echo(json.dumps(data))
コード例 #12
0
ファイル: cli.py プロジェクト: mapbox/usgs
def datasets(node, start_date, end_date):
    data = api.datasets(None, node, start_date=start_date, end_date=end_date)
    click.echo(json.dumps(data))
コード例 #13
0
ファイル: cli.py プロジェクト: danlopez00/usgs
def datasets(node, start_date, end_date):
    data = api.datasets(None, node, start_date=start_date, end_date=end_date)
    print(json.dumps(data))