예제 #1
0
def create_snapshots():
    """
    Run requests against USGS API for use in tests.
    """

    api_key = api.login(os.environ['USGS_USERNAME'], os.environ['USGS_PASSWORD'])

    # Dataset Fields
    response = api.dataset_fields("LANDSAT_8_C1", "EE", api_key=api_key)
    write_response(response, 'dataset-fields.json')

    # Datasets
    response = api.datasets(None, "EE")
    write_response(response, 'datasets.json')

    # Download
    response = api.download("LANDSAT_8_C1", "EE", ["LC80810712017104LGN00"], product='STANDARD')
    write_response(response, 'download.json')

    # Download Options
    response = api.download_options("LANDSAT_8_C1", "EE", ["LC80810712017104LGN00"])
    write_response(response, 'download-options.json')

    # Metadata
    response = api.metadata("LANDSAT_8_C1", "EE", ["LC80810712017104LGN00"])
    write_response(response, 'metadata.json')

    # Search
    response = api.search("LANDSAT_8_C1", "EE", start_date='20170401', end_date='20170402', max_results=10)
    write_response(response, 'search.json')

    api.logout(api_key)
예제 #2
0
def search_metadata(scene_id):
    # Set the Hyperion and Landsat 8 dataset
    hyperion_dataset = 'EO1_HYP_PUB'
    landsat8_dataset = 'LANDSAT_8'
    # Set the EarthExplorer catalog
    node = 'EE'
    # Submit requests to USGS servers
    return api.metadata(landsat8_dataset, node, [scene_id])
예제 #3
0
파일: cli.py 프로젝트: HydroLogic/usgs
def metadata(dataset, scene_ids, node, extended, api_key):
    
    if len(scene_ids) == 0:
        scene_ids = map(lambda s: s.strip(), click.open_file('-').readlines()) 
    
    node = get_node(dataset, node)
    data = api.metadata(dataset, node, scene_ids, extended=extended, api_key=api_key)
    print(json.dumps(data))
예제 #4
0
    def meta_data(self, entityId):
        """Get metadata.

        Args:
            entityId (str): Landsat8 product Id.

        """
        return api.metadata(self._dataset, self._node, [entityId],
                            api_key=self.api_key)
예제 #5
0
파일: cli.py 프로젝트: wildintellect/usgs
def metadata(dataset, scene_ids, node, extended, geojson, api_key):

    if len(scene_ids) == 0:
        scene_ids = map(lambda s: s.strip(), click.open_file('-').readlines())

    node = get_node(dataset, node)
    result = api.metadata(dataset, node, scene_ids, extended=extended, api_key=api_key)

    if geojson:
        result = to_geojson(result)

    click.echo(json.dumps(result))
예제 #6
0
파일: test_api.py 프로젝트: danlopez00/usgs
def test_metadata():

    expected_keys = [
        "acquisitionDate", "startTime", "endTime", "lowerLeftCoordinate",
        "upperLeftCoordinate", "upperRightCoordinate", "lowerRightCoordinate",
        "sceneBounds", "browseUrl", "dataAccessUrl", "downloadUrl", "entityId",
        "metadataUrl", "modifiedDate", "summary"
    ]

    results = api.metadata("LANDSAT_8", "EE", "LC80360332014357LGN00")
    for item in results:
        for key in expected_keys:
            assert item.get(key) is not None
예제 #7
0
파일: cli.py 프로젝트: danlopez00/usgs
def metadata(dataset, scene_ids, node, extended, geojson, api_key):
    
    if len(scene_ids) == 0:
        scene_ids = map(lambda s: s.strip(), click.open_file('-').readlines())
    
    node = get_node(dataset, node)
    data = api.metadata(dataset, node, scene_ids, extended=extended, api_key=api_key)
    
    if geojson:
        features = map(to_geojson_feature, data)
        data = { 'type': 'FeatureCollection', 'features': features }
    
    print(json.dumps(data))
예제 #8
0
def test_metadata():

    expected_keys = [
        "acquisitionDate", "startTime", "endTime",
        "lowerLeftCoordinate", "upperLeftCoordinate",
        "upperRightCoordinate", "lowerRightCoordinate",
        "sceneBounds", "browseUrl", "dataAccessUrl",
        "downloadUrl", "entityId", "metadataUrl",
        "modifiedDate", "summary"
    ]

    results = api.metadata("LANDSAT_8", "EE", "LC80360332014357LGN00")
    for item in results:
        for key in expected_keys:
            assert item.get(key) is not None
예제 #9
0
def test_metadata():

    expected_keys = [
        "acquisitionDate", "startTime", "endTime", "lowerLeftCoordinate",
        "upperLeftCoordinate", "upperRightCoordinate", "lowerRightCoordinate",
        "sceneBounds", "browseUrl", "dataAccessUrl", "downloadUrl", "entityId",
        "metadataUrl", "modifiedDate", "summary"
    ]

    response = api.metadata("LANDSAT_8_C1", "EE", ["LC80810712017104LGN00"])
    assert check_root_keys(response)

    for item in response['data']:
        for key in expected_keys:
            assert item.get(key) is not None
예제 #10
0
파일: test_api.py 프로젝트: mapbox/usgs
def test_metadata():

    expected_keys = [
        "acquisitionDate", "startTime", "endTime",
        "lowerLeftCoordinate", "upperLeftCoordinate",
        "upperRightCoordinate", "lowerRightCoordinate",
        "sceneBounds", "browseUrl", "dataAccessUrl",
        "downloadUrl", "entityId", "metadataUrl",
        "modifiedDate", "summary"
    ]

    response = api.metadata("LANDSAT_8_C1", "EE", ["LC80810712017104LGN00"])
    assert check_root_keys(response)

    for item in response['data']:
        for key in expected_keys:
            assert item.get(key) is not None
예제 #11
0
def create_snapshots():
    """
    Run requests against USGS API for use in tests.
    """

    api_key = api.login(os.environ['USGS_USERNAME'],
                        os.environ['USGS_PASSWORD'])

    # Dataset Fields
    response = api.dataset_fields("LANDSAT_8_C1", "EE", api_key=api_key)
    write_response(response, 'dataset-fields.json')

    # Datasets
    response = api.datasets(None, "EE")
    write_response(response, 'datasets.json')

    # Download
    response = api.download("LANDSAT_8_C1",
                            "EE", ["LC80810712017104LGN00"],
                            product='STANDARD')
    write_response(response, 'download.json')

    # Download Options
    response = api.download_options("LANDSAT_8_C1", "EE",
                                    ["LC80810712017104LGN00"])
    write_response(response, 'download-options.json')

    # Metadata
    response = api.metadata("LANDSAT_8_C1", "EE", ["LC80810712017104LGN00"])
    write_response(response, 'metadata.json')

    # Search
    response = api.search("LANDSAT_8_C1",
                          "EE",
                          start_date='20170401',
                          end_date='20170402',
                          max_results=10)
    write_response(response, 'search.json')

    api.logout(api_key)
예제 #12
0
    def download_preview(self, out_dir=tempfile.gettempdir(), download=True):
        """ Download preview with cloud coverage in the filename.

        Args:
            out_dir (str): The target directory,default system tempdir.
            download (bool): Whether or not to download the preview.

        """
        results_all = self.query()
        for y in results_all:
            t, result_tile = y
            for result in result_tile:
                entityId = result['entityId']
                browseUrl = result['browseUrl']
                displayId = result['displayId']

                acquisitionDate = result['acquisitionDate']
                # May find more than one for each tile, so make sure to
                # download the one matching the tile name.
                if displayId.split('_')[1][1:] != t:
                    continue
                # yield result['downloadUrl']
                if download:
                    # get cloud cover from metadata
                    meta = api.metadata(self._dataset,
                                        self._node, [entityId],
                                        api_key=self.api_key)
                    cloud_cover = meta['data'][0]['metadataFields'][4]['value']
                    # download
                    # print("Downloading preview from {}.".format(browseUrl))
                    download_one_by_requests_basic(
                        browseUrl,
                        os.path.join(
                            out_dir, "S2_CC{0:02.0f}_T{1}_D{2}.jpg".format(
                                float(cloud_cover), t,
                                acquisitionDate.replace('_', ''))))
예제 #13
0
from usgs import api
# 首先在usgs eros上注册自己的账号,然后pip安装usgs,接着使用usgs的CLI工具登陆的账号。
# $ pip install usgs
# $ usgs login [USGS username] [USGS password]
# Set the EarthExplorer catalog
node = 'EE'

# Set the Hyperion dataset
hyperion_dataset = 'EO1_HYP_PUB'

# Set the scene ids
hyperion_scene_id = 'EO1H1820422014302110K2_SG1_01'

# Submit requests to USGS servers
api.metadata(hyperion_dataset, node, [hyperion_scene_id])
예제 #14
0
def main(argsIn):  #pylint: disable=R0914,R0912

    try:

        usage = "usage: fetch_hdds_images.py [options]"
        parser = argparse.ArgumentParser(usage=usage)

        parser.add_argument("--output-folder",
                            dest="output_folder",
                            required=True,
                            help="Download files to this folder.")

        parser.add_argument("--user",
                            dest="user",
                            required=True,
                            help="User name for EarthExplorer website.")
        parser.add_argument("--password",
                            dest="password",
                            required=True,
                            help="Password name for EarthExplorer website.")

        parser.add_argument(
            "--force-login",
            action="store_true",
            dest="force_login",
            default=False,
            help="Don't reuse the cached EE API key if present.")

        parser.add_argument("--refetch-datasets",
                            action="store_true",
                            dest="refetch_datasets",
                            default=False,
                            help="Force a refetch of the dataset list.")

        parser.add_argument(
            "--refetch-scenes",
            action="store_true",
            dest="refetch_scenes",
            default=False,
            help="Force refetches of scene lists for each dataset.")

        parser.add_argument(
            "--image-list-path",
            dest="image_list_path",
            default=None,
            help=
            "Path to text file containing list of image IDs to download, one per line."
        )

        parser.add_argument("--event-name",
                            dest="event_name",
                            default=None,
                            help="Only download images from this event.")

        options = parser.parse_args(argsIn)

    except argparse.ArgumentError:
        print(usage)
        return -1

    if options.output_folder and not os.path.exists(options.output_folder):
        os.mkdir(options.output_folder)

    images_to_use = []
    if options.image_list_path:
        with open(options.image_list_path, 'r') as f:
            for line in f:
                images_to_use.append(line.strip())

    # Only log in if our session expired (ugly function use to check!)
    if options.force_login or (not api._get_api_key(None)):  #pylint: disable=W0212
        print('Logging in to USGS EarthExplorer...')
        api.login(options.user, options.password, save=True, catalogId=CATALOG)  #pylint: disable=W0612

        print(api._get_api_key(None))  #pylint: disable=W0212
        raise Exception('DEBUG')

    # Retrieve all of the available datasets
    dataset_list = get_dataset_list(options)

    print('Found ' + str(len(dataset_list)) + ' useable datasets.')
    #raise Exception('debug')

    # Don't think we need to do this!
    #get_dataset_fields(dataset_list)

    # TODO: Work through some errors.
    counter = 0
    for (dataset, full_name) in dataset_list:
        counter = counter + 1
        #if counter == 1:
        #    continue

        if options.event_name:  # Only download images from the specified event
            if options.event_name.lower() not in full_name.lower():
                continue

        dataset_folder = os.path.join(options.output_folder, full_name)
        scene_list_path = os.path.join(dataset_folder, 'scene_list.dat')
        done_flag_path = os.path.join(dataset_folder, 'done.flag')
        if not os.path.exists(dataset_folder):
            os.mkdir(dataset_folder)

        if os.path.exists(done_flag_path) and not options.refetch_scenes:
            print('Skipping completed dataset ' + full_name)
            continue

        print('--> Search scenes for: ' + full_name)

        BATCH_SIZE = 10000
        if not os.path.exists(scene_list_path) or options.refetch_scenes:
            # Request the scene list from USGS
            #details = {'Agency - Platform - Vendor':'WORLDVIEW', 'Sensor Type':'MS'}
            #details = {'sensor_type':'MS'}
            details = {}  # TODO: How do these work??

            # Large sets of results require multiple queries in order to get all of the data
            done = False
            error = False
            all_scenes = []  # Acculumate all scene data here
            while not done:
                print('Searching with start offset = ' + str(len(all_scenes)))
                results = api.search(dataset,
                                     CATALOG,
                                     where=details,
                                     max_results=BATCH_SIZE,
                                     starting_number=len(all_scenes),
                                     extended=False)

                if 'results' not in results['data']:
                    print('ERROR: Failed to get any results for dataset: ' +
                          full_name)
                    error = True
                    break
                if len(results['data']['results']) < BATCH_SIZE:
                    done = True
                all_scenes += results['data']['results']

            if error:
                continue

            results['data']['results'] = all_scenes

            # Cache the results to disk
            with open(scene_list_path, 'wb') as f:
                pickle.dump(results, f)

        else:  # Load the results from the cache file
            with open(scene_list_path, 'rb') as f:
                results = pickle.load(f)

        print('Got ' + str(len(results['data']['results'])) +
              ' scene results.')

        for scene in results['data']['results']:

            fail = False
            REQUIRED_PARTS = ['displayId', 'summary', 'entityId', 'displayId']
            for p in REQUIRED_PARTS:
                if (p not in scene) or (not scene[p]):
                    print('scene object is missing element: ' + p)
                    print(scene)
                    fail = True
            if fail:
                continue

            # If image list was provided skip other image names
            if images_to_use and (scene['displayId'] not in images_to_use):
                continue

            # Figure out the downloaded file path for this image
            file_name = scene['displayId'] + '.zip'
            output_path = os.path.join(dataset_folder, file_name)
            if not os.path.exists(dataset_folder):
                os.mkdir(dataset_folder)
            if os.path.exists(output_path):
                continue  # Already have the file!

            # Check if this is one of the sensors we are interested in.
            DESIRED_SENSORS = [('worldview', 'hp'),
                               ('worldview', 'msi')]  # TODO: Add more
            parts = scene['summary'].lower().split(',')
            platform = None
            sensor = None
            for part in parts:
                if 'platform:' in part:
                    platform = part.split(':')[1].strip()
                if 'sensor:' in part:
                    sensor = part.split(':')[1].strip()
            if (not platform) or (not sensor):
                raise Exception('Unknown sensor: ' + scene['summary'])
            if (platform, sensor) not in DESIRED_SENSORS:
                print((platform, sensor))
                print('Undesired sensor: ' + scene['summary'])
                continue

            # Investigate the number of bands
            PLATFORM_BAND_COUNTS = {'worldview': 8, 'TODO': 1}
            min_num_bands = PLATFORM_BAND_COUNTS[platform]
            num_bands = None
            try:
                meta = api.metadata(dataset, CATALOG, scene['entityId'])
            except json.decoder.JSONDecodeError:
                print('Error fetching metadata for dataset = ' + dataset +
                      ', entity = ' + scene['entityId'])
                continue
            try:
                for m in meta['data'][0]['metadataFields']:
                    if m['fieldName'] == 'Number of bands':
                        num_bands = int(m['value'])
                        break
                if not num_bands:
                    raise KeyError()  # Treat like the except case
                if num_bands < min_num_bands:
                    print('Skipping %s, too few bands: %d' %
                          (scene['displayId'], num_bands))
                    continue
            except KeyError:
                print('Unable to perform metadata check!')
                print(meta)

            # Make sure we know which file option to download
            try:
                types = api.download_options(dataset, CATALOG,
                                             scene['entityId'])
            except json.decoder.JSONDecodeError:
                print('Error decoding download options!')
                continue

            if not types['data'] or not types['data'][0]:
                raise Exception('Need to handle types: ' + str(types))
            ready = False
            download_type = 'STANDARD'  # TODO: Does this ever change?
            for o in types['data'][0]['downloadOptions']:
                if o['available'] and o['downloadCode'] == download_type:
                    ready = True
                    break
            if not ready:
                raise Exception('Missing download option for scene: ' +
                                str(types))

            # Get the download URL of the file we want.
            r = api.download(dataset,
                             CATALOG, [scene['entityId']],
                             product=download_type)
            try:
                url = r['data'][0]['url']
            except Exception as e:
                raise Exception('Failed to get download URL from result: ' +
                                str(r)) from e

            print(scene['summary'])
            # Finally download the data!
            cmd = ('wget "%s" --user %s --password %s -O %s' %
                   (url, options.user, options.password, output_path))
            print(cmd)
            os.system(cmd)

            #raise Exception('DEBUG')

        print('Finished processing dataset: ' + full_name)
        os.system('touch ' + done_flag_path)  # Mark this dataset as finished
        #raise Exception('DEBUG')

        #if not os.path.exists(output_path):
        #    raise Exception('Failed to download file ' + output_path)

    print('Finished downloading HDDS! files.')
    # Can just let this time out
    #api.logout()

    return 0