def usgs_eros(self, scene, path):
        """ Downloads the image from USGS """

        # download from usgs if login information is provided
        if self.usgs_user and self.usgs_pass:
            try:
                api_key = api.login(self.usgs_user, self.usgs_pass)
            except USGSError as e:
                error_tree = ElementTree.fromstring(str(e.message))
                error_text = error_tree.find(
                    "SOAP-ENV:Body/SOAP-ENV:Fault/faultstring",
                    api.NAMESPACES).text
                raise USGSInventoryAccessMissing(error_text)

            download_url = api.download('LANDSAT_8',
                                        'EE', [scene],
                                        api_key=api_key)
            if download_url:
                self.output('Source: USGS EarthExplorer',
                            normal=True,
                            arrow=True)
                return self.fetch(download_url[0], path)

            raise RemoteFileDoesntExist(
                '%s is not available on AWS S3, Google or USGS Earth Explorer'
                % scene)
        raise RemoteFileDoesntExist(
            '%s is not available on AWS S3 or Google Storage' % scene)
Beispiel #2
0
def download_url(dataset, scene_ids, product, node, api_key):
    
    node = get_node(dataset, node)
    
    data = api.download(dataset, node, scene_ids, product)
    for d in data:
        click.echo(d)
Beispiel #3
0
def create_snapshots():
    """
    Run requests against USGS API for use in tests.
    """

    api_key = api.login(os.environ['USGS_USERNAME'], os.environ['USGS_PASSWORD'])

    # Dataset Fields
    response = api.dataset_fields("LANDSAT_8_C1", "EE", api_key=api_key)
    write_response(response, 'dataset-fields.json')

    # Datasets
    response = api.datasets(None, "EE")
    write_response(response, 'datasets.json')

    # Download
    response = api.download("LANDSAT_8_C1", "EE", ["LC80810712017104LGN00"], product='STANDARD')
    write_response(response, 'download.json')

    # Download Options
    response = api.download_options("LANDSAT_8_C1", "EE", ["LC80810712017104LGN00"])
    write_response(response, 'download-options.json')

    # Metadata
    response = api.metadata("LANDSAT_8_C1", "EE", ["LC80810712017104LGN00"])
    write_response(response, 'metadata.json')

    # Search
    response = api.search("LANDSAT_8_C1", "EE", start_date='20170401', end_date='20170402', max_results=10)
    write_response(response, 'search.json')

    api.logout(api_key)
    def usgs(self, scenes):
        """ Downloads the image from USGS """

        if not isinstance(scenes, list):
            raise Exception('Expected sceneIDs list')

        scene_objs = Scenes()

        # download from usgs if login information is provided
        if self.usgs_user and self.usgs_pass:
            try:
                api_key = api.login(self.usgs_user, self.usgs_pass)
            except USGSError as e:
                error_tree = ElementTree.fromstring(str(e.message))
                error_text = error_tree.find("SOAP-ENV:Body/SOAP-ENV:Fault/faultstring", api.NAMESPACES).text
                raise USGSInventoryAccessMissing(error_text)

            for scene in scenes:
                download_urls = api.download('LANDSAT_8', 'EE', [scene], api_key=api_key)
                if download_urls:
                    logger.info('Source: USGS EarthExplorer')
                    scene_objs.add_with_files(scene, fetch(download_urls[0], self.download_dir))

                else:
                    raise RemoteFileDoesntExist('{0} not available on AWS S3, Google or USGS Earth Explorer'.format(
                                                ' - '.join(scene)))

            return scene_objs

        raise RemoteFileDoesntExist('{0} not available on AWS S3 or Google Storage'.format(' - '.join(scenes)))
Beispiel #5
0
def fetch_dswe_images(date, ll_coord, ur_coord, output_folder, user, password,
                      force_login):
    """Download all DSWE images that fit the given criteria to the output folder
       if they are not already present.  The coordinates must be in lon/lat degrees.
    """

    if not os.path.exists(output_folder):
        os.mkdir(output_folder)

    CATALOG = 'EE'
    DATASET = 'SP_TILE_DSWE'

    # Only log in if our session expired (ugly function use to check!)
    if force_login or (not api._get_api_key(None)):  #pylint: disable=W0212
        print('Logging in to USGS EarthExplorer...')
        dummy_result = api.login(user, password, save=True, catalogId=CATALOG)

    print('Submitting EarthExplorer query...')
    results = api.search(DATASET,
                         CATALOG,
                         where={},
                         start_date=date,
                         end_date=date,
                         ll=dict([('longitude', ll_coord[0]),
                                  ('latitude', ll_coord[1])]),
                         ur=dict([('longitude', ur_coord[0]),
                                  ('latitude', ur_coord[1])]),
                         max_results=12,
                         extended=False)

    if not results['data']:
        raise Exception(
            'Did not find any DSWE data that matched the Landsat file!')
    print('Found ' + str(len(results['data']['results'])) + ' matching files.')

    for scene in results['data']['results']:
        print('Found match: ' + scene['entityId'])

        fname = scene['entityId'] + '.tar'
        output_path = os.path.join(output_folder, fname)

        if os.path.exists(output_path):
            print('Already have image on disk!')
            continue

        r = api.download(DATASET, CATALOG, [scene['entityId']], product='DSWE')
        print(r)
        if not r['data']:
            raise Exception('Failed to get download URL!')
        url = r['data'][0]['url']
        cmd = ('wget "%s" --user %s --password %s -O %s' %
               (url, user, password, output_path))
        print(cmd)
        os.system(cmd)

        if not os.path.exists(output_path):
            raise Exception('Failed to download file ' + output_path)

    print('Finished downloading DSWE files.')
def get_download_url(scene_root, verbose):
    if 'USGS_PASSWORD' in os.environ:
        if verbose:
            print 'logging in...'
        api_key = api.login(os.environ['USGS_USERID'],
                            os.environ['USGS_PASSWORD'])
        if verbose:
            print '  api_key = %s' % api_key


    urls = api.download('LANDSAT_8', 'EE', [scene_root], 'STANDARD')
    return urls[0]
    def usgs_eros(self, scene, path):
        """ Downloads the image from USGS """

        # download from usgs if login information is provided
        if self.usgs_user and self.usgs_pass:
            try:
                api_key = api.login(self.usgs_user, self.usgs_pass)
            except USGSError as e:
                error_tree = ElementTree.fromstring(str(e.message))
                error_text = error_tree.find("SOAP-ENV:Body/SOAP-ENV:Fault/faultstring", api.NAMESPACES).text
                raise USGSInventoryAccessMissing(error_text)

            download_url = api.download('LANDSAT_8', 'EE', [scene], api_key=api_key)
            if download_url:
                self.output('Source: USGS EarthExplorer', normal=True, arrow=True)
                return self.fetch(download_url[0], path)

            raise RemoteFileDoesntExist('%s is not available on AWS S3, Google or USGS Earth Explorer' % scene)
        raise RemoteFileDoesntExist('%s is not available on AWS S3 or Google Storage' % scene)
Beispiel #8
0
def create_snapshots():
    """
    Run requests against USGS API for use in tests.
    """

    api_key = api.login(os.environ['USGS_USERNAME'],
                        os.environ['USGS_PASSWORD'])

    # Dataset Fields
    response = api.dataset_fields("LANDSAT_8_C1", "EE", api_key=api_key)
    write_response(response, 'dataset-fields.json')

    # Datasets
    response = api.datasets(None, "EE")
    write_response(response, 'datasets.json')

    # Download
    response = api.download("LANDSAT_8_C1",
                            "EE", ["LC80810712017104LGN00"],
                            product='STANDARD')
    write_response(response, 'download.json')

    # Download Options
    response = api.download_options("LANDSAT_8_C1", "EE",
                                    ["LC80810712017104LGN00"])
    write_response(response, 'download-options.json')

    # Metadata
    response = api.metadata("LANDSAT_8_C1", "EE", ["LC80810712017104LGN00"])
    write_response(response, 'metadata.json')

    # Search
    response = api.search("LANDSAT_8_C1",
                          "EE",
                          start_date='20170401',
                          end_date='20170402',
                          max_results=10)
    write_response(response, 'search.json')

    api.logout(api_key)
    def usgs(self, scenes):
        """ Downloads the image from USGS """

        if not isinstance(scenes, list):
            raise Exception('Expected sceneIDs list')

        scene_objs = Scenes()

        # download from usgs if login information is provided
        if self.usgs_user and self.usgs_pass:
            try:
                api_key = api.login(self.usgs_user, self.usgs_pass)
            except USGSError as e:
                error_tree = ElementTree.fromstring(str(e.message))
                error_text = error_tree.find(
                    "SOAP-ENV:Body/SOAP-ENV:Fault/faultstring",
                    api.NAMESPACES).text
                raise USGSInventoryAccessMissing(error_text)

            for scene in scenes:
                download_urls = api.download('LANDSAT_8',
                                             'EE', [scene],
                                             api_key=api_key)
                if download_urls:
                    logger.info('Source: USGS EarthExplorer')
                    scene_objs.add_with_files(
                        scene, fetch(download_urls[0], self.download_dir))

                else:
                    raise RemoteFileDoesntExist(
                        '{0} not available on AWS S3, Google or USGS Earth Explorer'
                        .format(' - '.join(scene)))

            return scene_objs

        raise RemoteFileDoesntExist(
            '{0} not available on AWS S3 or Google Storage'.format(
                ' - '.join(scenes)))
def get_download_url(scene_root, verbose):
    urls = api.download('LANDSAT_8', 'EE', [scene_root], 'STANDARD')
    return urls[0]
Beispiel #11
0
def main(argsIn):  #pylint: disable=R0914,R0912

    try:

        usage = "usage: fetch_hdds_images.py [options]"
        parser = argparse.ArgumentParser(usage=usage)

        parser.add_argument("--output-folder",
                            dest="output_folder",
                            required=True,
                            help="Download files to this folder.")

        parser.add_argument("--user",
                            dest="user",
                            required=True,
                            help="User name for EarthExplorer website.")
        parser.add_argument("--password",
                            dest="password",
                            required=True,
                            help="Password name for EarthExplorer website.")

        parser.add_argument(
            "--force-login",
            action="store_true",
            dest="force_login",
            default=False,
            help="Don't reuse the cached EE API key if present.")

        parser.add_argument("--refetch-datasets",
                            action="store_true",
                            dest="refetch_datasets",
                            default=False,
                            help="Force a refetch of the dataset list.")

        parser.add_argument(
            "--refetch-scenes",
            action="store_true",
            dest="refetch_scenes",
            default=False,
            help="Force refetches of scene lists for each dataset.")

        parser.add_argument(
            "--image-list-path",
            dest="image_list_path",
            default=None,
            help=
            "Path to text file containing list of image IDs to download, one per line."
        )

        parser.add_argument("--event-name",
                            dest="event_name",
                            default=None,
                            help="Only download images from this event.")

        options = parser.parse_args(argsIn)

    except argparse.ArgumentError:
        print(usage)
        return -1

    if options.output_folder and not os.path.exists(options.output_folder):
        os.mkdir(options.output_folder)

    images_to_use = []
    if options.image_list_path:
        with open(options.image_list_path, 'r') as f:
            for line in f:
                images_to_use.append(line.strip())

    # Only log in if our session expired (ugly function use to check!)
    if options.force_login or (not api._get_api_key(None)):  #pylint: disable=W0212
        print('Logging in to USGS EarthExplorer...')
        api.login(options.user, options.password, save=True, catalogId=CATALOG)  #pylint: disable=W0612

        print(api._get_api_key(None))  #pylint: disable=W0212
        raise Exception('DEBUG')

    # Retrieve all of the available datasets
    dataset_list = get_dataset_list(options)

    print('Found ' + str(len(dataset_list)) + ' useable datasets.')
    #raise Exception('debug')

    # Don't think we need to do this!
    #get_dataset_fields(dataset_list)

    # TODO: Work through some errors.
    counter = 0
    for (dataset, full_name) in dataset_list:
        counter = counter + 1
        #if counter == 1:
        #    continue

        if options.event_name:  # Only download images from the specified event
            if options.event_name.lower() not in full_name.lower():
                continue

        dataset_folder = os.path.join(options.output_folder, full_name)
        scene_list_path = os.path.join(dataset_folder, 'scene_list.dat')
        done_flag_path = os.path.join(dataset_folder, 'done.flag')
        if not os.path.exists(dataset_folder):
            os.mkdir(dataset_folder)

        if os.path.exists(done_flag_path) and not options.refetch_scenes:
            print('Skipping completed dataset ' + full_name)
            continue

        print('--> Search scenes for: ' + full_name)

        BATCH_SIZE = 10000
        if not os.path.exists(scene_list_path) or options.refetch_scenes:
            # Request the scene list from USGS
            #details = {'Agency - Platform - Vendor':'WORLDVIEW', 'Sensor Type':'MS'}
            #details = {'sensor_type':'MS'}
            details = {}  # TODO: How do these work??

            # Large sets of results require multiple queries in order to get all of the data
            done = False
            error = False
            all_scenes = []  # Acculumate all scene data here
            while not done:
                print('Searching with start offset = ' + str(len(all_scenes)))
                results = api.search(dataset,
                                     CATALOG,
                                     where=details,
                                     max_results=BATCH_SIZE,
                                     starting_number=len(all_scenes),
                                     extended=False)

                if 'results' not in results['data']:
                    print('ERROR: Failed to get any results for dataset: ' +
                          full_name)
                    error = True
                    break
                if len(results['data']['results']) < BATCH_SIZE:
                    done = True
                all_scenes += results['data']['results']

            if error:
                continue

            results['data']['results'] = all_scenes

            # Cache the results to disk
            with open(scene_list_path, 'wb') as f:
                pickle.dump(results, f)

        else:  # Load the results from the cache file
            with open(scene_list_path, 'rb') as f:
                results = pickle.load(f)

        print('Got ' + str(len(results['data']['results'])) +
              ' scene results.')

        for scene in results['data']['results']:

            fail = False
            REQUIRED_PARTS = ['displayId', 'summary', 'entityId', 'displayId']
            for p in REQUIRED_PARTS:
                if (p not in scene) or (not scene[p]):
                    print('scene object is missing element: ' + p)
                    print(scene)
                    fail = True
            if fail:
                continue

            # If image list was provided skip other image names
            if images_to_use and (scene['displayId'] not in images_to_use):
                continue

            # Figure out the downloaded file path for this image
            file_name = scene['displayId'] + '.zip'
            output_path = os.path.join(dataset_folder, file_name)
            if not os.path.exists(dataset_folder):
                os.mkdir(dataset_folder)
            if os.path.exists(output_path):
                continue  # Already have the file!

            # Check if this is one of the sensors we are interested in.
            DESIRED_SENSORS = [('worldview', 'hp'),
                               ('worldview', 'msi')]  # TODO: Add more
            parts = scene['summary'].lower().split(',')
            platform = None
            sensor = None
            for part in parts:
                if 'platform:' in part:
                    platform = part.split(':')[1].strip()
                if 'sensor:' in part:
                    sensor = part.split(':')[1].strip()
            if (not platform) or (not sensor):
                raise Exception('Unknown sensor: ' + scene['summary'])
            if (platform, sensor) not in DESIRED_SENSORS:
                print((platform, sensor))
                print('Undesired sensor: ' + scene['summary'])
                continue

            # Investigate the number of bands
            PLATFORM_BAND_COUNTS = {'worldview': 8, 'TODO': 1}
            min_num_bands = PLATFORM_BAND_COUNTS[platform]
            num_bands = None
            try:
                meta = api.metadata(dataset, CATALOG, scene['entityId'])
            except json.decoder.JSONDecodeError:
                print('Error fetching metadata for dataset = ' + dataset +
                      ', entity = ' + scene['entityId'])
                continue
            try:
                for m in meta['data'][0]['metadataFields']:
                    if m['fieldName'] == 'Number of bands':
                        num_bands = int(m['value'])
                        break
                if not num_bands:
                    raise KeyError()  # Treat like the except case
                if num_bands < min_num_bands:
                    print('Skipping %s, too few bands: %d' %
                          (scene['displayId'], num_bands))
                    continue
            except KeyError:
                print('Unable to perform metadata check!')
                print(meta)

            # Make sure we know which file option to download
            try:
                types = api.download_options(dataset, CATALOG,
                                             scene['entityId'])
            except json.decoder.JSONDecodeError:
                print('Error decoding download options!')
                continue

            if not types['data'] or not types['data'][0]:
                raise Exception('Need to handle types: ' + str(types))
            ready = False
            download_type = 'STANDARD'  # TODO: Does this ever change?
            for o in types['data'][0]['downloadOptions']:
                if o['available'] and o['downloadCode'] == download_type:
                    ready = True
                    break
            if not ready:
                raise Exception('Missing download option for scene: ' +
                                str(types))

            # Get the download URL of the file we want.
            r = api.download(dataset,
                             CATALOG, [scene['entityId']],
                             product=download_type)
            try:
                url = r['data'][0]['url']
            except Exception as e:
                raise Exception('Failed to get download URL from result: ' +
                                str(r)) from e

            print(scene['summary'])
            # Finally download the data!
            cmd = ('wget "%s" --user %s --password %s -O %s' %
                   (url, options.user, options.password, output_path))
            print(cmd)
            os.system(cmd)

            #raise Exception('DEBUG')

        print('Finished processing dataset: ' + full_name)
        os.system('touch ' + done_flag_path)  # Mark this dataset as finished
        #raise Exception('DEBUG')

        #if not os.path.exists(output_path):
        #    raise Exception('Failed to download file ' + output_path)

    print('Finished downloading HDDS! files.')
    # Can just let this time out
    #api.logout()

    return 0
Beispiel #12
0
def download_url(dataset, scene_ids, product, node, api_key):
    
    node = get_node(dataset, node)
    
    data = api.download(dataset, node, scene_ids, product)
    print(json.dumps(data))
Beispiel #13
0
def test_download():
    response = api.download("LANDSAT_8_C1",
                            "EE", ["LC80810712017104LGN00"],
                            product='STANDARD')
    assert check_root_keys(response)
    assert len(response['data']) == 1
def get_download_url(scene_root, verbose):
    urls = api.download('LANDSAT_8', 'EE', [scene_root], 'STANDARD')
    return urls[0]
Beispiel #15
0
def test_download():
    response = api.download("LANDSAT_8_C1", "EE", ["LC80810712017104LGN00"], product='STANDARD')
    assert check_root_keys(response)
    assert len(response['data']) == 1
Beispiel #16
0
def download_url(dataset, scene_ids, product, node, api_key):

    node = get_node(dataset, node)

    data = api.download(dataset, node, scene_ids, product)
    click.echo(json.dumps(data))
def dl_landsattiles(lss,
                    dataset,
                    apikey,
                    outdir,
                    mincctile=True,
                    maxcloudcover=100):
    sublss = defaultdict(dict)
    print('Subsetting scenes to keep those with minimum land cover...')
    if mincctile:
        for tile in lss['data']['results']:
            rowpath = tile['displayId'].split('_')[2]
            if rowpath in sublss:
                if float(tile['cloudCover']) < float(
                        sublss[rowpath]['cloudCover']):
                    sublss[rowpath] = tile
            else:
                sublss[rowpath] = tile
    else:
        sublss = lss

    for tile in sublss.values():
        # Only continue if there is less than set cloud cover on the image
        if float(tile['cloudCover']) < maxcloudcover:
            print(tile['entityId'])
            # Get tile download info
            gettile = api.download(dataset=dataset,
                                   node='EE',
                                   entityids=tile['entityId'],
                                   product='STANDARD',
                                   api_key=apikey)
            tileurl = gettile['data'][0]['url']

            # Get file name
            print('Trying to access {}...'.format(tileurl))
            req = urlopen_with_retry(in_url=tileurl)
            cd = req.headers.get('content-disposition')
            fname = re.findall('(?<=filename=")[A-Za-z0-9_.]+', cd)[0]
            tiledirname = os.path.join(
                outdir,
                os.path.splitext(os.path.splitext(fname)[0])[0])
            if not os.path.exists(tiledirname):
                os.mkdir(tiledirname)
            outpath = os.path.join(tiledirname, fname)

            if not os.path.exists(
                    outpath
            ):  # Inspired from https://github.com/yannforget/landsatxplore
                with open(outpath, 'wb') as f:
                    chunk = req.read()
                    f.write(chunk)
                print('Downloaded {}, now unzipping...'.format(outpath))

                tarf = os.path.splitext(outpath)[0]
                with gzip.GzipFile(outpath, 'rb') as input:
                    s = input.read()
                    with open(tarf, 'wb') as output:
                        output.write(s)

                with tarfile.open(tarf) as ftar:
                    ftar.extractall(
                        tiledirname)  # specify which folder to extract to
            else:
                print('{} already exists...'.format(outpath))