Пример #1
0
def download_by_id(item_id):
    PATH_RESULT = os.path.join(DOWNLOAD_LOCATION, item_id)
    if not os.path.exists(PATH_RESULT):
        os.makedirs(PATH_RESULT)
    # activate assets
    print("Downloading product id {}".format(item_id))
    assets = client.get_assets_by_id(ITEM_TYPE, item_id).get()

    for product in PRODUCTS:
        try:
            client.activate(assets["{}".format(product)])
        except:
            print("The Product {} isn't avaible for id: {}".format(product, item_id))
            next

    # wait until activation completes
    while True:
        assets = client.get_assets_by_id(ITEM_TYPE, item_id).get()
        if all(['location' in assets["{}".format(product)] for product in PRODUCTS]):
            break
        print("Waiting for the order to be available...")
        time.sleep(10)
        print("Trying again...")

    # Download product.
    for product in PRODUCTS:
        cd = client.download(assets[product],
                             callback=api.write_to_file(PATH_RESULT))
        file_name = cd.get_body().name
        print("{}: {}".format(product, file_name))
        file = open(LOG_IMAGES, "a")
        file.write(file_name + "\n")
        file.close()
    print("Finish, all is ok!!!")
Пример #2
0
def execute(args):
    '''
    This script executes the planet API to download images.

    Current capabilities only allow a single geographic region of interest
    to be obvserved, however, multiple different asset types are allowed.

    Date filters are all checked using REGEX as well as cloud cover percentage

    Currently I utilize command line arguments to give user ability to control
    item types (satellite source) and asset types (visual, etc)

    TODOS:

    1. How to handle requests that are invalid
            -especially when the asset type is not available
    2. How to handle long activation times
    3. Finalize input structure for arguments
    '''

    client = api.ClientV1(api_key=API_KEY)

    start_date = ArgsInputs.get_start_date(args.start_year, args.start_month,
                                           args.start_day)

    percent_cloud = ArgsInputs.get_cloud_percentage(args.cloud_percent)

    regional_filters = Filters.create_regional_filter(args.geo_json_path,
                                                      start_date,
                                                      percent_cloud)

    req = Requests.create_search_request(regional_filters, args.item_types)

    response = client.quick_search(req)

    path = os.path.join(args.output_dir, 'generated_results.json')

    print("Generated Results JSON")
    with open(path, 'w') as f:
        response.json_encode(f, indent=True)

    print("Begin Item Breakdown")
    callback = api.write_to_file(directory=args.output_dir)
    for item in response.items_iter(5):
        print(f"Item ID: {item['id']}")
        assets = client.get_assets(item).get()

        for asset_type in args.asset_type:
            asset = assets.get(asset_type)
            activation_status = asset.get('status')
            while activation_status == 'inactive':
                print("Activating.....")
                client.activate(asset)
                activation_status = asset.get('status')
                time.sleep(25)
            body = client.download(asset, callback=callback)
            body.wait()

        print("Item Downloaded")
        print("\n")
Пример #3
0
def download_quads(mosaic_name, quad_ids, dest):
    """
    Download quad geotiffs
    """
    quad_ids = read(quad_ids, split=True)
    cl = client()
    futures = call_and_wrap(cl.fetch_mosaic_quad_geotiffs, mosaic_name,
                            quad_ids, api.write_to_file(dest))
    complete(futures, check_futures, cl)
def download_quads(mosaic_name, quad_ids, dest):
    """
    Download quad geotiffs
    """
    quad_ids = read(quad_ids, split=True)
    cl = client()
    futures = call_and_wrap(cl.fetch_mosaic_quad_geotiffs, mosaic_name,
                            quad_ids, api.write_to_file(dest))
    complete(futures, check_futures, cl)
Пример #5
0
def fetch_scene_thumbnails(scene_ids, scene_type, size, fmt, dest):
    '''Fetch scene thumbnail(s)'''

    if len(scene_ids) == 0:
        src = click.open_file('-')
        if not src.isatty():
            scene_ids = map(lambda s: s.strip(), src.readlines())

    futures = client().fetch_scene_thumbnails(scene_ids, scene_type, size, fmt,
                                              api.write_to_file(dest))
    check_futures(futures)
Пример #6
0
def fetch_scene_thumbnails(scene_ids, scene_type, size, fmt, dest):
    '''Fetch scene thumbnail(s)'''

    if len(scene_ids) == 0:
        src = click.open_file('-')
        if not src.isatty():
            scene_ids = map(lambda s: s.strip(), src.readlines())

    futures = client().fetch_scene_thumbnails(scene_ids, scene_type, size, fmt,
                                              api.write_to_file(dest))
    check_futures(futures)
Пример #7
0
def downloaditems(client, available_collects, dest):

    for item in available_collects:
        print(item['id'])
        assets = client.get_assets(item).get()
        activation = client.activate(assets['analytic'])
        # wait for activation
        assets = client.get_assets(item).get()
        callback = api.write_to_file(dest)
        body = client.download(assets['analytic'], callback=callback)
        body. await ()
def fetch_scene_thumbnails(scene_ids, scene_type, size, fmt, dest):
    '''Fetch scene thumbnail(s)'''

    scene_ids = read(scene_ids or '@-', split=True)
    if not scene_ids:
        return

    cl = client()
    futures = cl.fetch_scene_thumbnails(scene_ids, scene_type, size, fmt,
                                        api.write_to_file(dest))
    complete(futures, check_futures, cl)
Пример #9
0
def fetch_scene_thumbnails(scene_ids, scene_type, size, fmt, dest):
    '''Fetch scene thumbnail(s)'''

    scene_ids = read(scene_ids or '@-', split=True)
    if not scene_ids:
        return

    cl = client()
    futures = cl.fetch_scene_thumbnails(scene_ids, scene_type, size, fmt,
                                        api.write_to_file(dest))
    complete(futures, check_futures, cl)
Пример #10
0
def active_and_downlaod_asset(item,asset_key,save_dir):

    assets = client.get_assets(item).get()

    asset = assets.get(asset_key)

    # activate
    activation = client.activate(asset)

    print(activation.response.status_code)

    asset_activated = False

    while asset_activated == False:
        # Get asset and its activation status
        assets = client.get_assets(item).get()  # need to used client to get the status
        asset = assets.get(asset_key)
        asset_status = asset["status"]

        # If asset is already active, we are done
        if asset_status == 'active':
            asset_activated = True
            print("Asset is active and ready to download")

        # Still activating. Wait and check again.
        else:
            print("...Still waiting for asset activation...")
            time.sleep(3)

    output_stream = sys.stdout
    def download_progress(start=None,wrote=None,total=None, finish=None): #result,skip=None
        # print(start,wrote,total,finish)
        # if total:
        #     # print('received: %.2f K'%(float(total)/1024.0))
        #     output_stream.write('received: %.2f K'%(float(total)/1024.0))
        #     output_stream.flush()
        # if total:
        #     if finish is None:
        #         print('received: %.2f K'%(float(total)/1024.0), end='\r')
        #     else:
        #         print('received: %.2f K' % (float(total) / 1024.0))
        pass
    callback = api.write_to_file(directory=save_dir + '/', callback=download_progress) # save_dir + '/'  #
    body = client.download(assets[asset_key], callback=callback)
    # if body._body.name == '':
    #     basic.outputlogMessage('Warning, the body name is missed, set as the asset key and id: item id: %s, asset %s'%(item['id'],asset_key))
    #     body._body.name = item['id']+'_'+asset_key  # AttributeError: can't set attribute
    body.await()

    return True
Пример #11
0
def download_a_asset_from_server(item, assets, asset_key, save_dir):
    '''
    download a asset from the server
    :param item: the item
    :param assets: assets from get_assets_from_server
    :param asset_key: the name of the asset
    :param save_dir: save dir
    :return: True if successful, Flase otherwise
    '''

    proc_id = multiprocessing.current_process().pid
    print('Process: %d, start downloading %s (id: %s)' %
          (proc_id, asset_key, item['id']))
    output_stream = sys.stdout

    def download_progress(start=None,
                          wrote=None,
                          total=None,
                          finish=None):  #result,skip=None
        # print(start,wrote,total,finish)
        # if total:
        #     # print('received: %.2f K'%(float(total)/1024.0))
        #     output_stream.write('received: %.2f K'%(float(total)/1024.0))
        #     output_stream.flush()
        # if total:
        #     if finish is None:
        #         print('received: %.2f K'%(float(total)/1024.0), end='\r')
        #     else:
        #         print('received: %.2f K' % (float(total) / 1024.0))
        pass

    callback = api.write_to_file(
        directory=save_dir + '/',
        callback=download_progress)  # save_dir + '/'  #
    body = client.download(assets[asset_key], callback=callback)
    # body.await() for version 1.1.0
    try:
        body.wait()  # for version > 1.4.2
    except APIException as e:
        output_planetAPI_error(
            'An APIException occurs when try to download %s (id: %s)' %
            (asset_key, item['id']))
        output_planetAPI_error(str(e))
        raise Exception("rate limit error or other API errors")
        # return False  # return a large number
    except:
        raise APIException

    return True
Пример #12
0
def fetch_lat_lon(image_dir, lat_lon_list, count = 1, scene_type = 'rapideye', product = 'analytic'):
    """
    Download a single rapideye image of each coordinate in a list of coordinates.

    :param image_directory: The directory where the images should be downloaded to.
    :param lat_lon_list: A list of coordinate tuples in the form of (lat, lon)
    :param scene_type: The type of satellite that the images should be from ('ortho' or 'rapideye')
    :param product: The image type. 'analytic' or 'visual'.
    :return:
    """
    points = [geojson.Point([lon, lat]) for lat, lon in lat_lon_list]
    intersects = [geojson.dumps(point) for point in points]
    sceneIDs = []

    url = "https://api.planet.com/v0/scenes/rapideye/"

    for intersect in intersects:

        params = {
            "cloud_cover.estimated.lte": 0,
            "intersects":intersect,
        }

        data = requests.get(url, params=params, auth=(const.API_KEY, ''))
        scenes_data = data.json()["features"]

        if not scenes_data:
            print "No scenes available for these coordinates: ", intersect
            continue

        counter = 0
        for scene in scenes_data:
            if counter == count: break
            sceneIDs.append(scene['id'])
            print scene['id']
            counter += 1

    print "Downloading scene list!"
    planet_client = api.Client(api_key = const.API_KEY)

    try:
        callback = api.write_to_file(image_dir)
        bodies = planet_client.fetch_scene_geotiffs(scene_ids = sceneIDs, product = product, scene_type=scene_type, callback=callback)

        for b in bodies:
            b.await()
    except Exception, e:
        print "Download failed: %s"%e
Пример #13
0
def download(scenes_file, product, path):
    with open(scenes_file) as f:
        scenes = json.load(f)

    assets = [
        planet.get_assets(scene).get()[product] for scene in scenes["features"]
    ]

    for asset in assets:
        click.echo(asset)
        planet.download(asset, api.write_to_file(path)). await ()

    files = [pth for pth in os.listdir(path) if pth.endswith(".tif")]
    try:
        os.mkdir(os.path.join(path, "processed"))
    except FileExistsError:
        pass

    for pth in tqdm(files):
        dataset = rasterio.open(os.path.join(path, pth))
        mask = dataset.read(4)
        data = dataset.read((1, 2, 3)).astype(np.int16)
        for i in range(data.shape[0]):
            data[i] = data[i] - 9999 * (~mask > 0)

        new_dataset = rasterio.open(os.path.join(path, "processed", pth),
                                    'w',
                                    driver='GTiff',
                                    height=dataset.shape[0],
                                    width=dataset.shape[1],
                                    count=3,
                                    dtype=str(data.dtype),
                                    crs=dataset.crs,
                                    transform=dataset.affine,
                                    nodata=-9999)
        new_dataset.write(data, [1, 2, 3])
        dataset.close()
        new_dataset.close()
Пример #14
0
 def activate_and_download_asset_type(asset_type):
     activated = False
     while not activated:
         dataset = \
             session.get(
                 ("https://api.planet.com/data/v1/item-types/" +
                 "{}/items/{}/assets/").format(item['properties']['item_type'], item['id']))
         # extract the activation url from the item for the desired asset
         item_activation_url = dataset.json(
         )[asset_type]["_links"]["activate"]
         # request activation
         response = session.post(item_activation_url)
         activated = (response.status_code == 204)
         if not activated:
             print("Waiting for activation of: ", item['id'])
             import time
             time.sleep(30.0)
     asset = client.get_assets(item).get()[asset_type]
     callback = api.write_to_file(directory=output_directory,
                                  callback=None,
                                  overwrite=True)
     name = client.download(asset, callback=callback).wait().name
     return name, item
def download_image():

    client = api.ClientV1(api_key=API_KEY)

    # defining the Area of Interest
    aoi = {
        "type": "Polygon",
        "coordinates": GEOMETRY,
    }

    # build a filter for the AOI
    query = api.filters.and_filter(
        api.filters.geom_filter(aoi),
        api.filters.range_filter('cloud_cover', gt=0),
        api.filters.range_filter('cloud_cover', lt=CLOUD_COVER))

    # we are requesting PlanetScope 3 Band imagery
    item_types = ['PSScene3Band']
    request = api.filters.build_search_request(query, item_types)

    # this will cause an exception if there are any API related errors
    results = client.quick_search(request)

    # creates the output directory if not already exists
    if not os.path.exists(SAVE_DIR):
        os.mkdir(SAVE_DIR)

    # items_iter returns an iterator over API response pages
    for item in results.items_iter(NUM_IMG):
        # each item is a GeoJSON feature
        print('downloading tile {}'.format(item['id']))
        assets = client.get_assets(item).get()
        activation = client.activate(assets['visual'])

        callback = api.write_to_file(directory=SAVE_DIR)
        body = client.download(assets['visual'], callback=callback)
        body. await ()
Пример #16
0
#Activate requests (tell Planet that be ready to start image downloading)
assets_list = []
for ids, item in zip(scene_id, item_type):
    assets_list.append(client.get_assets_by_id(id=ids, item_type=item).get())

#Activate the assets and get the status (204 is the good code ;])
activation_list = [
    client.activate(x['analytic']) for x in assets_list
    if 'analytics' in x.keys()
]
for e, i in enumerate(activation_list):
    print(i.response.status_code)

#Download images! (mnt/data)
callback = api.write_to_file(directory="/mnt/data/shared/planet/imgs_ts")
body = []
for i in assets_list:
    if "analytic" in i.keys():
        body.append(client.download(i['analytic'], callback=callback))

# Image processing

#Load requests
planet_torres_strait = []
with open('/mnt/data/shared/planet/results_planet_torres_strait.txt',
          'r') as json_file:
    for line in json_file:
        planet_torres_strait.append(eval(line))

#Get images that were downloaded
Пример #17
0
					WAIT = WAIT + 10				
					assets = client.get_assets(item).get()
				if WAIT > 30:
					WAIT = WAIT -10

				# wait for activation
				#assets = client.get_assets(item).get()
				if DEBUG:
					print("\n{}\n".format(sceneType[0]))
					pprint(assets[sceneType[0]])
					print("\n{}\n".format(sceneType[1]))
					pprint(assets[sceneType[1]])
					pprint(activation[0])
					pprint(activation[1])

				callback = api.write_to_file(directory=targetDir)


				#body = client.download(assets[sceneType[0]], callback=callback)
				bodyxml = client.download(assets[sceneType[1]], callback=callback)

				#body.await()
				bodyxml.await()
				#def warpToFile(targetDir, identifier, filename, poly, asset,targetSRID):
				warpToFile(targetDir, row['identifier'], "{}_{}".format(item['id'],sceneType[0]), poly, assets[sceneType[0]], TARGETSRID)


		else:
			
			print("\t {1} !! {0} already exists! Skipping!{2}".format(targetDir, RED, NORMAL))
      [-122.54, 37.81]
    ]
  ]
}"""

# will pick up api_key via environment variable PL_API_KEY
# but can be specified using `api_key` named argument
client = api.Client()

# collect all scenes here
scenes = []

print 'loading scenes'

# get `count` number of scenes, for this example, use 1 to verify paging
scene = client.get_scenes_list(count=1)
# we'll use 3 `pages` of results
for s in scene.iter(pages=3):
    scenes.extend(s.get()['features'])

assert len(scenes) == 3

ids = [f['id'] for f in scenes]
print 'fetching tiffs for'
print '\n'.join(ids)
results = client.fetch_scene_thumbnails(ids,
                                        callback=api.write_to_file(dest_dir))

# results are async objects and we have to ensure they all process
map(lambda r: r.await(), results)
Пример #19
0
def sync(destination, scene_type, limit):
    '''Synchronize a directory to a specified AOI'''
    if not path.exists(destination) or not path.isdir(destination):
        raise click.ClickException('destination must exist and be a directory')
    aoi_file = path.join(destination, 'aoi.geojson')
    if not path.exists(aoi_file):
        raise click.ClickException(
            'provide an aoi.geojson file in "%s"' % destination
        )
    aoi = None
    with open(aoi_file) as fp:
        aoi = fp.read()
    sync_file = path.join(destination, 'sync.json')
    if path.exists(sync_file):
        with open(sync_file) as fp:
            sync = json.loads(fp.read())
    else:
        sync = {}
    filters = {}
    if 'latest' in sync:
        filters['acquired.gt'] = sync['latest']
    start_time = time.time()
    transferred = 0
    _client = client()
    res = call_and_wrap(_client.get_scenes_list, scene_type=scene_type,
                        intersects=aoi, count=100, order_by='acquired asc',
                        **filters)
    click.echo('total scenes to fetch: %s' % res.get()['count'])
    if limit > 0:
        click.echo('limiting to %s' % limit)
    counter = type('counter', (object,),
                   {'remaining': res.get()['count'] if limit < 1 else limit})()
    latest = None

    def progress_callback(arg):
        if not isinstance(arg, int):
            counter.remaining -= 1
            click.echo('downloaded %s, remaining %s' %
                       (arg.name, counter.remaining))
    write_callback = api.write_to_file(destination, progress_callback)
    for page in res.iter():
        features = page.get()['features'][:counter.remaining]
        if not features:
            break
        ids = [f['id'] for f in features]
        futures = _client.fetch_scene_geotiffs(
            ids, scene_type, callback=write_callback
        )
        for f in features:
            metadata = path.join(destination, '%s_metadata.json' % f['id'])
            with open(metadata, 'wb') as fp:
                fp.write(json.dumps(f, indent=2))
        check_futures(futures)
        transferred += total_bytes(futures)
        recent = max([
            api.strp_timestamp(f['properties']['acquired']) for f in features]
        )
        latest = max(latest, recent) if latest else recent
        if counter.remaining <= 0:
            break
    if latest:
        sync['latest'] = api.strf_timestamp(latest)
        with open(sync_file, 'wb') as fp:
            fp.write(json.dumps(sync, indent=2))
    if transferred:
        summarize_throughput(transferred, start_time)
Пример #20
0
def sync(destination, scene_type, limit):
    '''Synchronize a directory to a specified AOI'''
    if not path.exists(destination) or not path.isdir(destination):
        raise click.ClickException('destination must exist and be a directory')
    aoi_file = path.join(destination, 'aoi.geojson')
    if not path.exists(aoi_file):
        raise click.ClickException('provide an aoi.geojson file in "%s"' %
                                   destination)
    aoi = None
    with open(aoi_file) as fp:
        aoi = fp.read()
    sync_file = path.join(destination, 'sync.json')
    if path.exists(sync_file):
        with open(sync_file) as fp:
            sync = json.loads(fp.read())
    else:
        sync = {}
    filters = {}
    if 'latest' in sync:
        filters['acquired.gt'] = sync['latest']
    start_time = time.time()
    transferred = 0
    _client = client()
    res = call_and_wrap(_client.get_scenes_list,
                        scene_type=scene_type,
                        intersects=aoi,
                        count=100,
                        order_by='acquired asc',
                        **filters)
    click.echo('total scenes to fetch: %s' % res.get()['count'])
    if limit > 0:
        click.echo('limiting to %s' % limit)
    counter = type('counter', (object, ),
                   {'remaining': res.get()['count'] if limit < 1 else limit})()
    latest = None

    def progress_callback(arg):
        if not isinstance(arg, int):
            counter.remaining -= 1
            click.echo('downloaded %s, remaining %s' %
                       (arg.name, counter.remaining))

    write_callback = api.write_to_file(destination, progress_callback)
    for page in res.iter():
        features = page.get()['features'][:counter.remaining]
        if not features:
            break
        ids = [f['id'] for f in features]
        futures = _client.fetch_scene_geotiffs(ids,
                                               scene_type,
                                               callback=write_callback)
        for f in features:
            metadata = path.join(destination, '%s_metadata.json' % f['id'])
            with open(metadata, 'wb') as fp:
                fp.write(json.dumps(f, indent=2))
        check_futures(futures)
        transferred += total_bytes(futures)
        recent = max([
            api.strp_timestamp(f['properties']['acquired']) for f in features
        ])
        latest = max(latest, recent) if latest else recent
        if counter.remaining <= 0:
            break
    if latest:
        sync['latest'] = api.strf_timestamp(latest)
        with open(sync_file, 'wb') as fp:
            fp.write(json.dumps(sync, indent=2))
    if transferred:
        summarize_throughput(transferred, start_time)
Пример #21
0
def fetch_images(image_directory, scene_type = 'rapideye', product = 'analytic', filters = {}):
    """

    :param image_directory: The directory where the original images should be saved to.
    :param scene_type: The satellite type that images should be fetched from.
    :param product: The type of imagery that will be downloaded. "analytic" is not pre-processed, "visual" is true-color.
    :param filters: Any filters for the search.
    :return:
    """

    if not os.path.isdir(image_directory):
        raise OSError("The specified path " + image_directory + " does not point to a directory!")

    if not filters:
        start = datetime.datetime(year=2011, month=1, day=1, tzinfo=pytz.utc).isoformat()
        end = datetime.datetime(year=2015, month=12, day=1, tzinfo=pytz.utc).isoformat()

        filters = {
            # Your filters here, for example:
            # Get images with estimated 0% cloud cover
            "cloud_cover.estimated.lte": 0,
            "acquired.gte": start,
            "acquired.lte": end
        }

    next_url = "https://api.planet.com/v0/scenes/" + scene_type + "/?" + urllib.urlencode(filters)

    scene_data_pages = []
    scene_IDs = []
    scene_count = 0

    print "Searching for %s images that comply with the given filters." % scene_type
    print "Scene List:"
    while next_url:
        # Note: you don't have to pass the filters in again here,
        # here, they will always be included in data.links.next
        r = requests.get(next_url, auth=(const.API_KEY, ''))
        r.raise_for_status()
        data = r.json()
        scenes_data = data["features"]
        scene_data_pages.append(scenes_data)

        for scene in scenes_data:
            scene_IDs.append(str(scene['id']))
            print str(scene['id'])

        scene_count += len(scenes_data)

        next_url = data["links"].get("next", None)

    print '\n%s total results' % scene_count
    print "Downloading scene list!"

    planet_client = api.Client(api_key = const.API_KEY)

    callback = api.write_to_file(image_directory)
    bodies = planet_client.fetch_scene_geotiffs(scene_IDs, scene_type = scene_type, product = product,  callback=callback)
    # await the completion of the asynchronous downloads, this is where
    # any exception handling should be performed
    for b in bodies:
        b.await()
####

for i in item_type:
    request = api.filters.build_search_request(PB_filter, [i],
                                               name=None,
                                               interval='day')
    results = client.quick_search(request)
    print i
    for item in results.items_iter(limit):
        print item['id']
        dataset = \
            session.get(
                ("https://api.planet.com/data/v1/item-types/" +
                "{}/items/{}/assets/").format(i, item['id']))
        # extract the activation url from the item for the desired asset
        item_activation_url = dataset.json()[asset_type]["_links"]["activate"]
        # request activation
        response = session.post(item_activation_url)
        print response.status_code
        while response.status_code != 204:
            time.sleep(30)
            response = session.post(item_activation_url)
            response.status_code = response.status_code
            print response.status_code
        assets = client.get_assets(item).get()
        callback = api.write_to_file(directory=ddir,
                                     callback=None,
                                     overwrite=True)
        body = client.download(assets[asset_type], callback=callback)
        body. await ()
Пример #23
0
#

#collect all scenes here
scenes = []

print 'loading scenes'

# get `count` number of scenes, for this example, use 1 to verify paging
filters = {'cloud_cover.estimated.lt': 0.1}
scene = client.get_scenes_list(scene_type= 'ortho', intersects=aoi, **filters)

# we'll use 3 `pages` of results
#for s in scene.iter(pages=1):
#	j = scenes.extend(s.get()['features'])

ids = [f['id'] for f in scenes]

print 'fetching tiffs for'
print '\n'.join(ids)
results = client.fetch_scene_thumbnails(ids, callback=api.write_to_file(outdir))

# results are async objects and we have to ensure they all process
#map(lambda r: r.await(), results)

#callback = api.write_to_file(directory='/home/jasondavis/planet_api/output') # change this
#callbacks = client.fetch_scene_geotiffs(ids, callback=callback)
#for cb in callbacks:
 #   body = cb.await()
 #   print 'downloaded', body.name, 'size=%s bytes' % len(body)
Пример #24
0
def activate_and_download_asset(item, asset_key, save_dir):
    '''
    active a asset of a item and download it
    :param item: the item
    :param asset_key: the name of the asset
    :param save_dir: save dir
    :return: True if successful, Flase otherwise
    '''

    assets = client.get_assets(item).get()

    asset = assets.get(asset_key)

    # activate
    activation = client.activate(asset)

    print(activation.response.status_code)
    if int(activation.response.status_code) == 401:
        basic.outputlogMessage(
            'The account does not have permissions to download this file')
        return False

    asset_activated = False

    while asset_activated == False:
        # Get asset and its activation status
        assets = client.get_assets(
            item).get()  # need to used client to get the status
        asset = assets.get(asset_key)
        asset_status = asset["status"]

        # If asset is already active, we are done
        if asset_status == 'active':
            asset_activated = True
            print("Asset is active and ready to download")

        # Still activating. Wait and check again.
        else:
            print("...Still waiting for asset activation...")
            time.sleep(3)

    output_stream = sys.stdout

    def download_progress(start=None,
                          wrote=None,
                          total=None,
                          finish=None):  #result,skip=None
        # print(start,wrote,total,finish)
        # if total:
        #     # print('received: %.2f K'%(float(total)/1024.0))
        #     output_stream.write('received: %.2f K'%(float(total)/1024.0))
        #     output_stream.flush()
        # if total:
        #     if finish is None:
        #         print('received: %.2f K'%(float(total)/1024.0), end='\r')
        #     else:
        #         print('received: %.2f K' % (float(total) / 1024.0))
        pass

    callback = api.write_to_file(
        directory=save_dir + '/',
        callback=download_progress)  # save_dir + '/'  #
    body = client.download(assets[asset_key], callback=callback)
    # body.await() for version 1.1.0
    body.wait()  # for version > 1.4.2

    return True