Exemplo n.º 1
0
def main(jsonfile, startdate, enddate, outputdir):
    PLANET_API_KEY = os.getenv('PL_API_KEY')

    client = api.ClientV1()

    with open(jsonfile, 'r') as f:
        data = json.load(f)

    aoi = data['features'][0]['geometry']

    query = api.filters.and_filter(api.filters.geom_filter(aoi), \
      api.filters.date_range('acquired', gt=startdate, lt=enddate))
    ## api.filters.range_filter('cloud_cover', gte=0.1), \

    item_types4 = ['PSScene4Band']
    request4 = api.filters.build_search_request(query, item_types4)
    item_types3 = ['PSScene3Band']
    request3 = api.filters.build_search_request(query, item_types3)

    results3 = client.quick_search(request3)
    results4 = client.quick_search(request4)

    myreps3 = []
    myreps4 = []
    list3 = []
    list4 = []

    for item in results4.items_iter(limit=100):
        list4.append(item)
        myreps4.append(item['id'])
        if (item['properties']['instrument'] == 'PS2.SD'):
            print(r'%s : %s' % (item['id'], 'Super Dove'))
        else:
            print(r'%s : %s' % (item['id'], 'Dove'))

    for item in results3.items_iter(limit=100):
        ## print(r'%s' % item['id'])
        myreps3.append(item['id'])

    if (len(myreps3) > len(myreps4)):
        diff34 = np.setdiff1d(myreps3, myreps4).tolist()
        print("\nPossible 3Band data that could be made to 4Band:")
        [print("%s" % thisid) for thisid in diff34]

    print("\n")

    mydownloader = downloader.create(client)

    print((r'Starting Download of %d images.') % len(myreps4))
    mydownloader.download(results4.items_iter(limit=100), ['udm2'], outputdir)
    print(('Finished with Download of udm2.'))
    mydownloader.download(results4.items_iter(limit=100), ['analytic_sr'],
                          outputdir)
    print(('Finished with Download of analytic_sr.'))
    mydownloader.download(results4.items_iter(limit=100), ['analytic_xml'],
                          outputdir)
    print(('Finished with Download of metadata_xml.'))
    mydownloader.shutdown()
    print(('Downloader has been shut down.'))
    return (0)
Exemplo n.º 2
0
def do_search(pl_filter, arguments):

    # Get client

    client = planetapi.ClientV1()
    # Build request; this will cause an exception if there are any API related errors
    request = planetapi.filters.build_search_request(pl_filter,
                                                     arguments.satellite)

    results = client.quick_search(request)

    if args.doprint is True:
        print("Results from:", ", ".join(arguments.satellite))

    out_buffer = []
    # items_iter returns an iterator over API response pages
    for item in results.items_iter(limit=None):
        # each item is a GeoJSON feature
        if arguments.doprint is True:
            stdout.write('%s\n' % item['id'])
            out_buffer.append(item)

        else:
            out_buffer.append(item)

    with open('./result.json', 'w') as request_out:
        json.dump(out_buffer,
                  request_out,
                  sort_keys=True,
                  indent=4,
                  ensure_ascii=False)
Exemplo n.º 3
0
def validate_key(key, out):
    """Validate the API key and save it the key variable"""

    out.add_msg(cm.planet.test_key)

    # get all the subscriptions
    resp = requests.get(planet.url, auth=(key, ''))
    subs = resp.json()

    # only continue if the resp was 200
    if resp.status_code != 200:
        raise Exception(subs['message'])

    # check the subscription validity
    # stop the execution if it's not the case
    planet.valid = any([True for sub in subs if sub['state'] == 'active'])
    check_key()

    # autheticate to planet
    planet.client = api.ClientV1(api_key=key)

    planet.key = key

    out.add_msg(cm.planet.valid_key, 'success')

    return
Exemplo n.º 4
0
    def __init__(self, api_key, config):
        imagery_config = config['imagery']
        self.api_key = api_key
        self.max_clouds_initial = float(imagery_config['max_clouds_initial'])
        self.max_clouds = float(imagery_config['max_clouds']
                                )  # max proportion of pixels that are clouds
        self.max_bad_pixels = float(
            imagery_config['max_bad_pixels']
        )  # max proportion of bad pixels (transmission errors, etc.)
        self.max_nodata = float(
            imagery_config['max_nodata'])  # max nodata values per cellgrid
        self.maximgs = int(imagery_config['maximgs'])  # 15 #10 #20
        self.output_encoding = imagery_config['output_encoding']
        # self.output_filename = imagery_config['output_filename']
        # self.output_filename_csv = imagery_config['output_filename_csv']
        self.catalog_path = imagery_config['catalog_path']
        # self.s3_catalog_bucket = imagery_config['s3_catalog_bucket']
        # self.s3_catalog_prefix = imagery_config['s3_catalog_prefix']
        self.products = {
            'analytic_sr': {
                'item_type': 'PSScene4Band',
                'asset_type': 'analytic_sr',
                'ext': 'tif'
            },
            'analytic': {
                'item_type': 'PSScene4Band',
                'asset_type': 'analytic',
                'ext': 'tif'
            },
            'analytic_xml': {
                'item_type': 'PSScene4Band',
                'asset_type': 'analytic_xml',
                'ext': 'xml'
            },
            'visual': {
                'item_type': 'PSScene3Band',
                'asset_type': 'visual',
                'ext': 'tif'
            }
        }
        self.client = api.ClientV1(api_key=self.api_key)
        self.s3client = boto3.client('s3')
        # self.with_analytic = json.loads(imagery_config['with_analytic'].lower())
        # self.with_analytic_xml = json.loads(imagery_config['with_analytic_xml'].lower())
        # self.with_visual = json.loads(imagery_config['with_visual'].lower())
        # self.with_immediate_cleanup = json.loads(imagery_config['with_immediate_cleanup'].lower())
        # self.local_mode = json.loads(imagery_config['local_mode'].lower())
        # self.s3_only = json.loads(imagery_config['s3_only'].lower())
        # self.transfer = S3Transfer(self.s3client, TransferConfig(use_threads = False))
        self.logger = logging.getLogger(__name__)
        self.logger.setLevel(logging.INFO)
        # planet has limitation 5 sec per key (search queries)
        threads_number = imagery_config['threads']
        if threads_number == 'default':
            threads_number = multiprocessing.cpu_count() * 2 + 1
        else:
            threads_number = int(threads_number)

        self.secondary_uploads_executor = FixedThreadPoolExecutor(
            size=threads_number)
Exemplo n.º 5
0
def execute(args):
    '''
    This script executes the planet API to download images.

    Current capabilities only allow a single geographic region of interest
    to be obvserved, however, multiple different asset types are allowed.

    Date filters are all checked using REGEX as well as cloud cover percentage

    Currently I utilize command line arguments to give user ability to control
    item types (satellite source) and asset types (visual, etc)

    TODOS:

    1. How to handle requests that are invalid
            -especially when the asset type is not available
    2. How to handle long activation times
    3. Finalize input structure for arguments
    '''

    client = api.ClientV1(api_key=API_KEY)

    start_date = ArgsInputs.get_start_date(args.start_year, args.start_month,
                                           args.start_day)

    percent_cloud = ArgsInputs.get_cloud_percentage(args.cloud_percent)

    regional_filters = Filters.create_regional_filter(args.geo_json_path,
                                                      start_date,
                                                      percent_cloud)

    req = Requests.create_search_request(regional_filters, args.item_types)

    response = client.quick_search(req)

    path = os.path.join(args.output_dir, 'generated_results.json')

    print("Generated Results JSON")
    with open(path, 'w') as f:
        response.json_encode(f, indent=True)

    print("Begin Item Breakdown")
    callback = api.write_to_file(directory=args.output_dir)
    for item in response.items_iter(5):
        print(f"Item ID: {item['id']}")
        assets = client.get_assets(item).get()

        for asset_type in args.asset_type:
            asset = assets.get(asset_type)
            activation_status = asset.get('status')
            while activation_status == 'inactive':
                print("Activating.....")
                client.activate(asset)
                activation_status = asset.get('status')
                time.sleep(25)
            body = client.download(asset, callback=callback)
            body.wait()

        print("Item Downloaded")
        print("\n")
def test_missing_api_key():
    '''verify exception raised on missing API key'''
    client = api.ClientV1(api_key=None)
    try:
        client._get('whatevs').get_body()
    except api.exceptions.InvalidAPIKey as ex:
        assert str(ex) == 'No API key provided'
    else:
        assert False
Exemplo n.º 7
0
def process_upload(upload_id):
    """Create Raster Foundry scenes and attach to relevant projects

    Args:
        upload_id (str): ID of Raster Foundry upload to process
    """
    click.echo("Processing upload")

    logger.info('Getting upload')
    upload = Upload.from_id(upload_id)
    logger.info('Updating upload status')
    upload.update_upload_status('Processing')

    logger.info('Processing upload (%s) for user %s with files %s',
                upload.id, upload.owner, upload.files)

    try:
        if upload.uploadType.lower() in ['local', 's3']:
            logger.info('Processing a geotiff upload')
            factory = GeoTiffS3SceneFactory(upload)
        elif upload.uploadType.lower() == 'planet':
            logger.info('Processing a planet upload. This might take a while...')
            factory = PlanetSceneFactory(
                upload.files,
                upload.datasource,
                upload.organizationId,
                upload.id,
                upload.projectId,
                upload.visibility,
                [],
                upload.owner,
                api.ClientV1(upload.metadata.get('planetKey'))
            )
        else:
            raise Exception('upload type ({}) didn\'t make any sense'.format(upload.uploadType))
        scenes = factory.generate_scenes()
        logger.info('Creating scene objects for upload %s, preparing to POST to API', upload.id)

        created_scenes = [scene.create() for scene in scenes]
        logger.info('Successfully created %s scenes (%s)', len(created_scenes), [s.id for s in created_scenes])

        if upload.projectId:
            logger.info('Upload specified a project. Linking scenes to project %s', upload.projectId)
            scene_ids = [scene.id for scene in created_scenes]
            batch_scene_to_project_url = '{HOST}/api/projects/{PROJECT}/scenes'.format(HOST=HOST, PROJECT=upload.projectId)
            session = get_session()
            response = session.post(batch_scene_to_project_url, json=scene_ids)
            response.raise_for_status()
        upload.update_upload_status('Complete')
        logger.info('Finished importing scenes for upload (%s) for user %s with files %s',
                    upload.id, upload.owner, upload.files)
    except:
        logger.error('Failed to process upload (%s) for user %s with files %s',
                     upload.id, upload.owner, upload.files)
        upload.update_upload_status('Failed')
        raise
Exemplo n.º 8
0
def analytics_client_v1():
    # Non-default analytics base URL doesn't have the analytics postfix
    params = dict(**client_params)
    if client_params.get('analytics_base_url') is not None:
        params['base_url'] = params.pop('analytics_base_url')
    else:
        params['base_url'] = 'https://api.planet.com/analytics/'

    client = api.ClientV1(**params)
    return client
Exemplo n.º 9
0
 def __init__(self,  aoi, 
              item_type=ItemType.PSScene4Band,
              product_id=None,
              title=None,
              description=None,
              overwrite=False,
              start_datetime=None,
              end_datetime=None,
              cloud_fraction=1,
              limit=None,
              order_id=None,
              api_key=None):
     
     self._planet = p.ClientV1(api_key=api_key)
     self._catalog = dl.Catalog()
     self._metadata = dl.Metadata()
     self._auth = dl.Auth()
     self._order_id = order_id
     self._title=title
     self._description=description
     self.stats = None
     self._running = False
     self._items = []
     self.uploads = []
     
     
     if self._running:
         raise Exception('Already processing')
     else:
         self._running = True
         
     self._start_datetime = start_datetime
     self._end_datetime = end_datetime
     self._cloud_fraction = cloud_fraction
     self._limit =limit
    
     self._get_items(aoi, [item_type.name])
     self._init_product(product_id, item_type=item_type, overwrite=overwrite)
    
     item_ids = [item['id'] for item in self._items]
     
     scenes = clip_and_download(aoi, item_ids, item_type.name, item_type.bundle, api_key, order_id=self._order_id)
     for scene_id, scene in scenes.items():
             
         
             with open(scene['metadata.json']) as meta_file:
                 metadata = json.load(meta_file)['properties']
             
             with open(scene['3B_AnalyticMS_metadata_clip.xml']) as xml_file:
                 xml_meta = xmltodict.parse(xml_file.read())
             for band in xml_meta['ps:EarthObservation']['gml:resultOf']['ps:EarthObservationResult']['ps:bandSpecificMetadata']:
                 metadata[f"band_{band['ps:bandNumber']}_radiometricScaleFactor"] = band['ps:radiometricScaleFactor']
                 metadata[f"band_{band['ps:bandNumber']}_reflectanceCoefficient"] = band['ps:reflectanceCoefficient']
             
             self._upload_image([str(scene[str(file_key)]) for file_key in item_type.files] , metadata, scene_id)
Exemplo n.º 10
0
    def __init__(self, geojson_directory=None, default_item_type=None):
        # if not specified, use constants
        if geojson_directory == None:
            geojson_directory = GEOJSON_DIRECTORY
        if default_item_type == None:
            default_item_type = DEFAULT_ITEM_TYPE

        self.item_type = default_item_type
        self.client = api.ClientV1()
        self.geojson_dir = geojson_directory

        self.search_results = {}
Exemplo n.º 11
0
 def __init__(self, client=None, **kwargs):
     if not client:
         client = api.ClientV1()
     super().__init__(client, **kwargs)
     if self.specs['landcover_indices']:
         self._tweak_landcover_specs()
     self._validate_asset_type()
     self._bandmap = {
         k: v.get(self.specs['asset_type'])
         for k, v in BANDMAP.items()
     }
     self._keymap = KEYMAP.copy()
     self._search_filters = self._build_search_filters()
Exemplo n.º 12
0
 def __init__(self, api_key):
     self.api_key = api_key
     self.max_clouds_initial = 0.25
     self.max_clouds = 0.01
     self.max_shadows = 0.01
     self.max_bad_pixels = 0.25
     self.max_nodata = 0.25
     self.maximgs = 1
     self.catalog_path = "catalog/"
     self.s3_catalog_bucket = "azavea-africa-test"
     self.s3_catalog_prefix = "planet/images"
     self.products = {
         'analytic_sr': {
             'item_type': 'PSScene4Band',
             'asset_type': 'analytic_sr',
             'ext': 'tif'
         },
         'analytic': {
             'item_type': 'PSScene4Band',
             'asset_type': 'analytic',
             'ext': 'tif'
         },
         'analytic_xml': {
             'item_type': 'PSScene4Band',
             'asset_type': 'analytic_xml',
             'ext': 'xml'
         },
         'visual': {
             'item_type': 'PSScene3Band',
             'asset_type': 'visual',
             'ext': 'tif'
         }
     }
     self.client = api.ClientV1(api_key=api_key)
     self.output_filename = "output.csv"
     self.output_encoding = "utf-8"
     self.s3client = boto3.client('s3')
     self.with_analytic = False
     self.with_analytic_xml = False
     self.with_visual = False
     self.local_mode = False
     self.s3_only = False
     self.transfer = S3Transfer(self.s3client,
                                TransferConfig(use_threads=False))
     self.transfer_config = TransferConfig(use_threads=False)
     self.logger = logging.getLogger(__name__)
     self.logger.setLevel(logging.INFO)
     self.secondary_uploads_executor = FixedThreadPoolExecutor(size=5)
     self.with_immediate_cleanup = False
Exemplo n.º 13
0
def get_and_set_Planet_key(user_account):
    keyfile = HOME+'/.planetkey'
    with open(keyfile) as f_obj:
        lines = f_obj.readlines()
        for line in lines:
            if user_account in line:
                key_str = line.split(':')[1]
                key_str = key_str.strip()       # remove '\n'
                os.environ["PL_API_KEY"] = key_str
                # set Planet API client
                global client
                client = api.ClientV1(api_key = key_str)

                return True
        raise ValueError('account: %s cannot find in %s'%(user_account,keyfile))
Exemplo n.º 14
0
def getPlanetPicture(fireDataSet):
    distance = 45
    for i in range(0, 100):

        client = api.ClientV1(api_key="e262ca6835e64fa7b6975c558237e509")

        geom = HF.getbox(i, distance, fireDataSet)

        geom_AOI = {"type": "Polygon", "coordinates": [geom]}

        pre_date = HF.getDatePre(fireDataSet, i)
        post_date = HF.getDatePost(fireDataSet, i)

        datePre_filter = filters.date_range('acquired',
                                            gte=pre_date[0],
                                            lte=pre_date[1])
        datePost_filter = filters.date_range('acquired',
                                             gte=post_date[0],
                                             lte=post_date[1])
        geom_filter = filters.geom_filter(geom_AOI)
        cloud_filter = filters.range_filter('cloud_cover', lte=0.03)

        andPre_filter = filters.and_filter(datePre_filter, cloud_filter,
                                           geom_filter)
        andPost_filter = filters.and_filter(datePost_filter, cloud_filter,
                                            geom_filter)

        item_types = ["PSOrthoTile"]
        reqPre = filters.build_search_request(andPre_filter, item_types)
        reqPost = filters.build_search_request(andPost_filter, item_types)

        resPre = client.quick_search(reqPre)
        resPost = client.quick_search(reqPost)

        print("it should print something :")

        for item in resPre.items_iter(1):
            print(item['id'], item['properties']['item_type'])

        for item in resPost.items_iter(1):
            print(item['id'], item['properties']['item_type'])

    imagePre = None
    imagePost = None
    return imagePre, imagePost
Exemplo n.º 15
0
def main(API_KEY, _request, AOI_geojson, Coverage=None):
    # Set up the client
    Client = api.ClientV1(API_KEY)

    # Request with the search filter
    res = Client.quick_search(_request)

    # Get all the results in a JSON object from the paged response
    File_Object = StringIO()
    res.json_encode(File_Object)
    res_json = json.loads(File_Object.getvalue())

    if Coverage is not None:
        # Loop through every geometry to calculate the coverage
        strip_images = list()
        geoms = AOI_geojson['features']
        for Planet_feature in res_json['features']:
            CoverageResults = list()

            # Loop through every geometry feature
            for geom in geoms:
                CoverageResults = Geo_tool.MeetCoverageRange(
                    json.dumps(geom), json.dumps(Planet_feature), Coverage[0],
                    Coverage[1])
            # If the images don't satisfy the coverage filter, add it into the strip list
            if not any(CoverageResults):
                strip_images.append(Planet_feature)

        # Strip images from the result
        if len(strip_images) > 0:
            for strip_image in strip_images:
                res_json['features'].remove(strip_image)

    # Report and quit the application if there is no result
    if len(res_json['features']) == 0:
        prompt_widget.InfoBox('No result',
                              'No selected items match the search criterion.')
        sys.exit(0)
    else:
        # Report the result number
        prompt_widget.InfoBox(
            'Search result',
            'Found {} matched items.'.format(len(res_json['features'])))

    return res_json
def main(jsonfile, startdate, enddate, outputdir):
    client = api.ClientV1()

    with open(jsonfile, 'r') as f:
        data = json.load(f)

    aoi = data['features'][0]['geometry']
    ## aoi = {
    ##   "type": "Polygon",
    ##   "coordinates": [
    ##     [
    ##       [float(ullatlon[0]), float(ullatlon[1])],
    ##       [float(lrlatlon[0]), float(ullatlon[1])],
    ##       [float(lrlatlon[0]), float(lrlatlon[1])],
    ##       [float(ullatlon[0]), float(lrlatlon[1])],
    ##       [float(ullatlon[0]), float(ullatlon[1])],
    ##     ]
    ##   ]
    ## }

    query = api.filters.and_filter(api.filters.geom_filter(aoi), \
      api.filters.range_filter('cloud_cover', lt=0.1), \
      api.filters.date_range('acquired', gt=startdate, lt=enddate))

    item_types = ['PSScene4Band']
    request = api.filters.build_search_request(query, item_types)

    results = client.quick_search(request)

    myreps = []

    for item in results.items_iter(limit=100):
        ## sys.stdout.write(r'%s\n' % item['id'])
        print(r'%s' % item['id'])
        myreps.append(item)

    mydownloader = downloader.create(client)

    print((r'Starting Download of %d images.') % len(myreps))
    mydownloader.download(results.items_iter(len(myreps)), ['analytic_sr'],
                          outputdir)
    mydownloader.shutdown()
    print(('Finished with Download.'))
    return (0)
def download_planet(ullatlon, lrlatlon, outputdir):
    client = api.ClientV1()

    ## done = download_planet(ullatlon, lrlatlon, outputdir)

    aoi = {
        "type":
        "Polygon",
        "coordinates": [[
            [ullatlon[0], ullatlon[1]],
            [lrlatlon[0], ullatlon[1]],
            [lrlatlon[0], lrlatlon[1]],
            [ullatlon[0], lrlatlon[1]],
            [ullatlon[0], ullatlon[1]],
        ]]
    }

    query = api.filters.and_filter(api.filters.geom_filter(aoi), \
      api.filters.range_filter('cloud_cover', lt=0.1), \
      api.filters.date_range('acquired', gt='2016-08-01', lt='2018-04-30'))

    ## item_types = ['PSScene4Band']
    item_types = ['SkySatScene']
    request = api.filters.build_search_request(query, item_types)

    results = client.quick_search(request)

    myreps = []

    for item in results.items_iter(limit=100):
        ## sys.stdout.write(r'%s\n' % item['id'])
        print(r'%s' % item['id'])
        myreps.append(item)

    mydownloader = downloader.create(client)

    print((r'Starting Download of %d images.') % len(myreps))
    mydownloader.download(results.items_iter(len(myreps)), ['analytic'],
                          outputdir)
    mydownloader.shutdown()
    print(('Finished with Download.'))
    return (0)
Exemplo n.º 18
0
def order_basemaps(key, out):
    """check the apy key and then order the basemap to update the select list"""
    
    # checking the key validity
    validate_key(key, out)
    
    out.add_msg(cm.planet.mosaic.load)
    
    # autheticate to planet
    planet.client = api.ClientV1(api_key=planet.key)
    
    # get the basemap names 
    # to use when PLanet decide to update it's API, until then I manually retreive the mosaics
    #mosaics = planet.client.get_mosaics().get()['mosaics']
    url = planet.client._url('basemaps/v1/mosaics')
    mosaics = planet.client._get(url, api.models.Mosaics, params={'_page_size': 1000}).get_body().get()['mosaics']

    out.add_msg(cm.planet.mosaic.complete, 'success')
    
    return [m['name'] for m in mosaics]
Exemplo n.º 19
0
    def _download(self):
        """Downloads the clipped images using the Planet Orders API."""
        client = api.ClientV1(api_key=PL_API_KEY)
        mysize = self.size / (111000. * math.cos(math.radians(self.latitude)))
        geom1 = get_geometry(self.longitude, self.latitude, mysize)

        item_ids = get_item_ids(client,
                                geom1,
                                start=self.start,
                                stop=self.stop,
                                limit=self.limit)
        #print("Clipping from {} items.".format(len(item_ids)))

        geom2 = get_geometry(self.longitude, self.latitude, 20 * mysize)
        self._clip_request_json = self._get_clip_request(item_ids, geom2)

        auth = HTTPBasicAuth(PL_API_KEY, '')
        clip_order_url = orders.place_order(self._clip_request_json, auth)
        orders.poll_for_success(clip_order_url, auth)
        downloaded_clip_files = orders.download_order(
            clip_order_url, auth, destination=self.download_dir)
        return downloaded_clip_files
Exemplo n.º 20
0
    def run(self):
        # Check authentication
        API_KEY = ValidateAccount()

        # Initialisation
        Client = api.ClientV1(API_KEY)

        # Check available items based on assets permissions
        Items_asset = dict()
        _permission_filter = api.filters.permission_filter('assets:download')

        for _index, Item in enumerate(self.Items):
            _request = api.filters.build_search_request(
                _permission_filter, [Item])
            try:
                _result = Client.quick_search(_request)
                _result_json = _result.get()

            except Exception as Other_Errors:
                prompt_widget.ErrorBox(
                    'Something went wrong',
                    '{}\nPlease try again later.'.format(Other_Errors))
                sys.exit(0)
            if self.message.empty():
                self.message.put('{0:<{width}}'.format(
                    'Checking the permission of {} ...'.format(Item),
                    width=self.max_message_length))
            if self.progress.empty():
                self.progress.put(_index)
            if len(_result_json['features']) > 0:
                # Add available assets to respective items
                Items_asset[Item] = _Items_assets[Item]

            # # Wait for 1 second after each request to avoid exceeding the request limit
            sleep(1)

        if self.result.empty():
            self.result.put(Items_asset)
def download_image():

    client = api.ClientV1(api_key=API_KEY)

    # defining the Area of Interest
    aoi = {
        "type": "Polygon",
        "coordinates": GEOMETRY,
    }

    # build a filter for the AOI
    query = api.filters.and_filter(
        api.filters.geom_filter(aoi),
        api.filters.range_filter('cloud_cover', gt=0),
        api.filters.range_filter('cloud_cover', lt=CLOUD_COVER))

    # we are requesting PlanetScope 3 Band imagery
    item_types = ['PSScene3Band']
    request = api.filters.build_search_request(query, item_types)

    # this will cause an exception if there are any API related errors
    results = client.quick_search(request)

    # creates the output directory if not already exists
    if not os.path.exists(SAVE_DIR):
        os.mkdir(SAVE_DIR)

    # items_iter returns an iterator over API response pages
    for item in results.items_iter(NUM_IMG):
        # each item is a GeoJSON feature
        print('downloading tile {}'.format(item['id']))
        assets = client.get_assets(item).get()
        activation = client.activate(assets['visual'])

        callback = api.write_to_file(directory=SAVE_DIR)
        body = client.download(assets['visual'], callback=callback)
        body. await ()
Exemplo n.º 22
0
        os.chdir(orig_path)
        break
    elif (os.path.basename(head) == "Coding Experiments"):
        os.chdir(os.path.join(head, main_folder))
        orig_path = os.getcwd()
        break
    else:
        current_path = head
        orig_path = current_path
print(orig_path)

get_ipython().run_line_magic('matplotlib', 'inline')
# will pick up api_key via environment variable PL_API_KEY
# but can be specified using `api_key` named argument
api_keys = json.load(open("apikeys.json", 'r'))
client = api.ClientV1(api_key=api_keys["PLANET_API_KEY"])

save_path = os.path.join(orig_path, 'tiffFiles')
os.chdir(save_path)

# # Make a slippy map to get GeoJSON
#
# * The planet API allows you to query using a [geojson](https://en.wikipedia.org/wiki/GeoJSON) which is a special flavor of json.
# * We are going to create a slippy map using leaflet and apply the Planet 2017 Q1 mosaic as the basemap. This requires our api key.
# * We are going to add a special draw handler that shoves a draw region into a object so we get the geojson.
# * If you don't want to do this, or need a fixed query try [geojson.io](http://geojson.io/#map=2/20.0/0.0)
# * To install and run:
# ```
# $ pip install ipyleaflet
# $ jupyter nbextension enable --py --sys-prefix ipyleaflet
# $ jupyter nbextension enable --py --sys-prefix widgetsnbextension
Exemplo n.º 23
0
from sys import stdout

# GeoJSON AOI
# e.g. http://geojson.io
aoi = {
    "type":
    "Polygon",
    "coordinates": [[[-122.45593070983887, 37.76060492968732],
                     [-122.41996765136719, 37.76060492968732],
                     [-122.41996765136719, 37.80184969073113],
                     [-122.45593070983887, 37.80184969073113],
                     [-122.45593070983887, 37.76060492968732]]]
}

# Get API key
api_key = api.ClientV1().login('*****@*****.**', 'gertan20')

# Create client
client = api.ClientV1(api_key=api_key['api_key'])

# Build a query using the AOI and a cloud_cover filter
# that get images with lower than 10% cloud cover
# and acquired on Nov 1st, 2017
query = filters.and_filter(
    filters.geom_filter(aoi), filters.range_filter('cloud_cover', lt=0.1),
    filters.date_range('acquired',
                       gte='2017-11-01T00:00:00.000Z',
                       lte='2017-11-01T23:59:00.000Z'))

# Build a request for only PlanetScope imagery
# Item types:
from planet import api
import sys
import os
import requests
import time

apikey = os.getenv('PLANET_API_KEY')

ddir = str(sys.argv[1])
sdate = str(sys.argv[2])
edate = str(sys.argv[3])
limit = int(sys.argv[4])

client = api.ClientV1(api_key=apikey)

####
## Define data specifications
####
# geometry
aoi = {
    "type":
    "Polygon",
    "coordinates": [[[-148.97048950195312, 70.14456261942247],
                     [-148.282470703125, 70.14456261942247],
                     [-148.282470703125, 70.41333338476161],
                     [-148.97048950195312, 70.41333338476161],
                     [-148.97048950195312, 70.14456261942247]]]
}

ubicacion = api.filters.geom_filter(aoi)
Exemplo n.º 25
0
from planet.api import filters
from planet.api.auth import find_api_key
from shapely.geometry import shape
from shapely.ops import transform
from datetime import datetime
from datetime import timezone
from time import mktime
from planet.api.utils import strp_lenient

try:
    PL_API_KEY = find_api_key()
except Exception as e:
    print("Failed to get Planet Key")
    sys.exit()

client = api.ClientV1(PL_API_KEY)

temp = {"coordinates": [], "type": "MultiPolygon"}
tempsingle = {"coordinates": [], "type": "Polygon"}
stbase = {"config": [], "field_name": [], "type": "StringInFilter"}
rbase = {
    "config": {
        "gte": [],
        "lte": []
    },
    "field_name": [],
    "type": "RangeFilter"
}


# Get time right
Exemplo n.º 26
0
# to planet imagery
#
# output a CSV of PlanetScope item-id, cloud cover, and date acquired
# for a region around the San Fransisco peninsula

# geojson AOI
aoi = {
    "type":
    "Polygon",
    "coordinates": [[[-122.54, 37.81], [-122.38, 37.84], [-122.35, 37.71],
                     [-122.53, 37.70], [-122.54, 37.81]]]
}

# will pick up api_key via environment variable PL_API_KEY
# but can be specified using `api_key` named argument
client = api.ClientV1()

# build a query using the AOI and
# a cloud_cover filter that excludes 'cloud free' scenes
query = filters.and_filter(
    filters.geom_filter(aoi),
    filters.range_filter('cloud_cover', gt=0),
)

# build a request for only PlanetScope imagery
request = filters.build_search_request(
    query, item_types=['PSScene3Band', 'PSScene4Band'])

# if you don't have an API key configured, this will raise an exception
result = client.quick_search(request)
Exemplo n.º 27
0
def clientv1():
    client_params.pop('analytics_base_url', None)
    return api.ClientV1(**client_params)
Exemplo n.º 28
0
def main():

    # Variable assigned from USGS product dictionary
    planet_api_key = options['api_key']
    item_type = options['item_type']
    input_name = options['input_name']
    output_name = options['output_name']
    filter_start_date = options['start_date_filter']
    filter_end_date = options['end_date_filter']
    cloud_cover = options['cloud_cover']
    gsd = options['gsd']
    sun_azimuth = options['sun_azimuth']
    sun_elevation = options['sun_elevation']
    view_angle = options['view_angle']

    # Set date range filters
    start_date_range_filter = api.filters.date_range('acquired',
                                                     gte=filter_start_date)
    end_date_range_filter = api.filters.date_range('acquired',
                                                   lte=filter_end_date)

    # Set cloud filter (Optional)
    cloud_cover_low, cloud_cover_high = cloud_cover
    cloud_cover_low_filter = api.filters.range_filter('cloud_cover',
                                                      gt=cloud_cover_low),
    cloud_cover_high_filter = api.filters.range_filter('cloud_cover',
                                                       lt=cloud_cover_high)

    # Set gsd filter NumberInFilter (Optional)

    # Set sun azimuth filter (Optional)

    # Set sun elevation filter (Optional)

    # Set view angle filter (Optional)

    # Set ground_control filter StringInFilter (String 'true', 'false')(Optional)

    # visible_percent RangeFilter (Int 0-100)

    # usable data RangeFilter (Double 0.0 - 1.0)

    # Set permissions filter to only return downloadable data
    permission_filter = api.filters.permission_filter('assets:download')

    request_filter = api.filters.and_filter(start_date_range_filter,
                                            end_date_range_filter,
                                            cloud_cover_low_filter,
                                            cloud_cover_high_filter,
                                            permission_filter)

    planet_query_filter = api.filters.build_search_request([item_type],
                                                           request_filter)

    nav_string = usgs_product_dict[gui_product]
    product = nav_string['product']
    product_format = nav_string['format']
    product_extensions = tuple(nav_string['extension'].split(','))
    product_is_zip = nav_string['zip']
    product_srs = nav_string['srs']
    product_proj4 = nav_string['srs_proj4']
    product_interpolation = nav_string['interpolation']
    product_url_split = nav_string['url_split']
    product_extent = nav_string['extent']
    gui_subset = None

    #Set Planet API Key and client
    os.environ['PL_API_KEY'] = planet_api_key
    client = api.ClientV1()

    # Parameter assignments for each dataset
    if gui_product == 'ned':
        gui_dataset = options['ned_dataset']
        ned_api_name = ''
        if options['ned_dataset'] == 'ned1sec':
            ned_data_abbrv = 'ned_1arc_'
            ned_api_name = '1 arc-second'
        if options['ned_dataset'] == 'ned13sec':
            ned_data_abbrv = 'ned_13arc_'
            ned_api_name = '1/3 arc-second'
        if options['ned_dataset'] == 'ned19sec':
            ned_data_abbrv = 'ned_19arc_'
            ned_api_name = '1/9 arc-second'
        product_tag = product + " " + ned_api_name

    if gui_product == 'nlcd':
        gui_dataset = options['nlcd_dataset']
        if options['nlcd_dataset'] == 'nlcd2001':
            gui_dataset = 'National Land Cover Database (NLCD) - 2001'
        if options['nlcd_dataset'] == 'nlcd2006':
            gui_dataset = 'National Land Cover Database (NLCD) - 2006'
        if options['nlcd_dataset'] == 'nlcd2011':
            gui_dataset = 'National Land Cover Database (NLCD) - 2011'

        if options['nlcd_subset'] == 'landcover':
            gui_subset = 'Land Cover'
        if options['nlcd_subset'] == 'impervious':
            gui_subset = 'Percent Developed Imperviousness'
        if options['nlcd_subset'] == 'canopy':
            gui_subset = 'Percent Tree Canopy'
        product_tag = gui_dataset

    if gui_product == 'naip':
        gui_dataset = 'Imagery - 1 meter (NAIP)'
        product_tag = nav_string['product']

    has_pdal = gscript.find_program(pgm='v.in.pdal')
    if gui_product == 'lidar':
        gui_dataset = 'Lidar Point Cloud (LPC)'
        product_tag = nav_string['product']
        if not has_pdal:
            gscript.warning(
                _("Module v.in.pdal is missing,"
                  " any downloaded data will not be processed."))
    # Assigning further parameters from GUI
    gui_output_layer = options['output_name']
    gui_resampling_method = options['resampling_method']
    gui_i_flag = flags['i']
    gui_k_flag = flags['k']
    work_dir = options['output_directory']
    memory = options['memory']
    nprocs = options['nprocs']

    preserve_extracted_files = gui_k_flag
    use_existing_extracted_files = True
    preserve_imported_tiles = gui_k_flag
    use_existing_imported_tiles = True

    if not os.path.isdir(work_dir):
        gscript.fatal(
            _("Directory <{}> does not exist."
              " Please create it.").format(work_dir))

    # Returns current units
    try:
        proj = gscript.parse_command('g.proj', flags='g')
        if gscript.locn_is_latlong():
            product_resolution = nav_string['dataset'][gui_dataset][0]
        elif float(proj['meters']) == 1:
            product_resolution = nav_string['dataset'][gui_dataset][1]
        else:
            # we assume feet
            product_resolution = nav_string['dataset'][gui_dataset][2]
    except TypeError:
        product_resolution = False
    if gui_product == 'lidar' and options['resolution']:
        product_resolution = float(options['resolution'])

    if gui_resampling_method == 'default':
        gui_resampling_method = nav_string['interpolation']
        gscript.verbose(
            _("The default resampling method for product {product} is {res}").
            format(product=gui_product, res=product_interpolation))

    # Get coordinates for current GRASS computational region and convert to USGS SRS
    gregion = gscript.region()
    wgs84 = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'
    min_coords = gscript.read_command('m.proj',
                                      coordinates=(gregion['w'], gregion['s']),
                                      proj_out=wgs84,
                                      separator='comma',
                                      flags='d')
    max_coords = gscript.read_command('m.proj',
                                      coordinates=(gregion['e'], gregion['n']),
                                      proj_out=wgs84,
                                      separator='comma',
                                      flags='d')
    min_list = min_coords.split(',')[:2]
    max_list = max_coords.split(',')[:2]
    list_bbox = min_list + max_list
    str_bbox = ",".join((str(coord) for coord in list_bbox))

    # Format variables for TNM API call
    gui_prod_str = str(product_tag)
    datasets = quote_plus(gui_prod_str)
    prod_format = quote_plus(product_format)
    prod_extent = quote_plus(product_extent[0])

    # Create TNM API URL
    base_TNM = "https://viewer.nationalmap.gov/tnmaccess/api/products?"
    datasets_TNM = "datasets={0}".format(datasets)
    bbox_TNM = "&bbox={0}".format(str_bbox)
    prod_format_TNM = "&prodFormats={0}".format(prod_format)
    TNM_API_URL = base_TNM + datasets_TNM + bbox_TNM + prod_format_TNM
    if gui_product == 'nlcd':
        TNM_API_URL += "&prodExtents={0}".format(prod_extent)
    gscript.verbose("TNM API Query URL:\t{0}".format(TNM_API_URL))

    # Query TNM API
    try_again_messge = _(
        "Possibly, the query has timed out. Check network configuration and try again."
    )
    try:
        TNM_API_GET = urlopen(TNM_API_URL, timeout=12)
    except HTTPError as error:
        gscript.fatal(
            _("HTTP(S) error from USGS TNM API:"
              " {code}: {reason} ({instructions})").format(
                  reason=error.reason,
                  code=error.code,
                  instructions=try_again_messge))
    except (URLError, OSError, IOError) as error:
        # Catching also SSLError and potentially others which are
        # subclasses of IOError in Python 2 and of OSError in Python 3.
        gscript.fatal(
            _("Error accessing USGS TNM API: {error} ({instructions})").format(
                error=error, instructions=try_again_messge))

    # Parse return JSON object from API query
    try:
        return_JSON = json.load(TNM_API_GET)
        if return_JSON['errors']:
            TNM_API_error = return_JSON['errors']
            api_error_msg = "TNM API Error - {0}".format(str(TNM_API_error))
            gscript.fatal(api_error_msg)
        if gui_product == 'lidar' and options['title_filter']:
            return_JSON['items'] = [
                item for item in return_JSON['items']
                if options['title_filter'] in item['title']
            ]
            return_JSON['total'] = len(return_JSON['items'])

    except:
        gscript.fatal(_("Unable to load USGS JSON object."))

    # Functions down_list() and exist_list() used to determine
    # existing files and those that need to be downloaded.
    def down_list():
        dwnld_url.append(TNM_file_URL)
        dwnld_size.append(TNM_file_size)
        TNM_file_titles.append(TNM_file_title)
        if product_is_zip:
            extract_zip_list.append(local_zip_path)
        if f['datasets'][0] not in dataset_name:
            if len(dataset_name) <= 1:
                dataset_name.append(str(f['datasets'][0]))

    def exist_list():
        exist_TNM_titles.append(TNM_file_title)
        exist_dwnld_url.append(TNM_file_URL)
        if product_is_zip:
            exist_zip_list.append(local_zip_path)
            extract_zip_list.append(local_zip_path)
        else:
            exist_tile_list.append(local_tile_path)

    # Assign needed parameters from returned JSON
    tile_API_count = int(return_JSON['total'])
    tiles_needed_count = 0
    size_diff_tolerance = 5
    exist_dwnld_size = 0
    if tile_API_count > 0:
        dwnld_size = []
        dwnld_url = []
        dataset_name = []
        TNM_file_titles = []
        exist_dwnld_url = []
        exist_TNM_titles = []
        exist_zip_list = []
        exist_tile_list = []
        extract_zip_list = []
        # for each file returned, assign variables to needed parameters
        for f in return_JSON['items']:
            TNM_file_title = f['title']
            TNM_file_URL = str(f['downloadURL'])
            TNM_file_size = int(f['sizeInBytes'])
            TNM_file_name = TNM_file_URL.split(product_url_split)[-1]
            if gui_product == 'ned':
                local_file_path = os.path.join(work_dir,
                                               ned_data_abbrv + TNM_file_name)
                local_zip_path = os.path.join(work_dir,
                                              ned_data_abbrv + TNM_file_name)
                local_tile_path = os.path.join(work_dir,
                                               ned_data_abbrv + TNM_file_name)
            else:
                local_file_path = os.path.join(work_dir, TNM_file_name)
                local_zip_path = os.path.join(work_dir, TNM_file_name)
                local_tile_path = os.path.join(work_dir, TNM_file_name)
            file_exists = os.path.exists(local_file_path)
            file_complete = None
            # if file exists, but is incomplete, remove file and redownload
            if file_exists:
                existing_local_file_size = os.path.getsize(local_file_path)
                # if local file is incomplete
                if abs(existing_local_file_size -
                       TNM_file_size) > size_diff_tolerance:
                    # add file to cleanup list
                    cleanup_list.append(local_file_path)
                    # NLCD API query returns subsets that cannot be filtered before
                    # results are returned. gui_subset is used to filter results.
                    if not gui_subset:
                        tiles_needed_count += 1
                        down_list()
                    else:
                        if gui_subset in TNM_file_title:
                            tiles_needed_count += 1
                            down_list()
                        else:
                            continue
                else:
                    if not gui_subset:
                        tiles_needed_count += 1
                        exist_list()
                        exist_dwnld_size += TNM_file_size
                    else:
                        if gui_subset in TNM_file_title:
                            tiles_needed_count += 1
                            exist_list()
                            exist_dwnld_size += TNM_file_size
                        else:
                            continue
            else:
                if not gui_subset:
                    tiles_needed_count += 1
                    down_list()
                else:
                    if gui_subset in TNM_file_title:
                        tiles_needed_count += 1
                        down_list()
                        continue

    # return fatal error if API query returns no results for GUI input
    elif tile_API_count == 0:
        gscript.fatal(
            _("TNM API ERROR or Zero tiles available for given input parameters."
              ))

    # number of files to be downloaded
    file_download_count = len(dwnld_url)

    # remove existing files from download lists
    for t in exist_TNM_titles:
        if t in TNM_file_titles:
            TNM_file_titles.remove(t)
    for url in exist_dwnld_url:
        if url in dwnld_url:
            dwnld_url.remove(url)

    # messages to user about status of files to be kept, removed, or downloaded
    if exist_zip_list:
        exist_msg = _(
            "\n{0} of {1} files/archive(s) exist locally and will be used by module."
        ).format(len(exist_zip_list), tiles_needed_count)
        gscript.message(exist_msg)
    # TODO: fix this way of reporting and merge it with the one in use
    if exist_tile_list:
        exist_msg = _(
            "\n{0} of {1} files/archive(s) exist locally and will be used by module."
        ).format(len(exist_tile_list), tiles_needed_count)
        gscript.message(exist_msg)
    # TODO: simply continue with whatever is needed to be done in this case
    if cleanup_list:
        cleanup_msg = _(
            "\n{0} existing incomplete file(s) detected and removed. Run module again."
        ).format(len(cleanup_list))
        gscript.fatal(cleanup_msg)

    # formats JSON size from bites into needed units for combined file size
    if dwnld_size:
        total_size = sum(dwnld_size)
        len_total_size = len(str(total_size))
        if 6 < len_total_size < 10:
            total_size_float = total_size * 1e-6
            total_size_str = str("{0:.2f}".format(total_size_float) + " MB")
        if len_total_size >= 10:
            total_size_float = total_size * 1e-9
            total_size_str = str("{0:.2f}".format(total_size_float) + " GB")
    else:
        total_size_str = '0'

    # Prints 'none' if all tiles available locally
    if TNM_file_titles:
        TNM_file_titles_info = "\n".join(TNM_file_titles)
    else:
        TNM_file_titles_info = 'none'

    # Formatted return for 'i' flag
    if file_download_count <= 0:
        data_info = "USGS file(s) to download: NONE"
        if gui_product == 'nlcd':
            if tile_API_count != file_download_count:
                if tiles_needed_count == 0:
                    nlcd_unavailable = "NLCD {0} data unavailable for input parameters".format(
                        gui_subset)
                    gscript.fatal(nlcd_unavailable)
    else:
        data_info = (
            "USGS file(s) to download:",
            "-------------------------",
            "Total download size:\t{size}",
            "Tile count:\t{count}",
            "USGS SRS:\t{srs}",
            "USGS tile titles:\n{tile}",
            "-------------------------",
        )
        data_info = '\n'.join(data_info).format(size=total_size_str,
                                                count=file_download_count,
                                                srs=product_srs,
                                                tile=TNM_file_titles_info)
    print(data_info)

    if gui_i_flag:
        gscript.info(
            _("To download USGS data, remove <i> flag, and rerun r.in.usgs."))
        sys.exit()

    # USGS data download process
    if file_download_count <= 0:
        gscript.message(_("Extracting existing USGS Data..."))
    else:
        gscript.message(_("Downloading USGS Data..."))

    TNM_count = len(dwnld_url)
    download_count = 0
    local_tile_path_list = []
    local_zip_path_list = []
    patch_names = []

    # Download files
    for url in dwnld_url:
        # create file name by splitting name from returned url
        # add file name to local download directory
        if gui_product == 'ned':
            file_name = ned_data_abbrv + url.split(product_url_split)[-1]
            local_file_path = os.path.join(work_dir, file_name)
        else:
            file_name = url.split(product_url_split)[-1]
            local_file_path = os.path.join(work_dir, file_name)
        try:
            # download files in chunks rather than write complete files to memory
            dwnld_req = urlopen(url, timeout=12)
            download_bytes = int(dwnld_req.info()['Content-Length'])
            CHUNK = 16 * 1024
            with open(local_file_path, "wb+") as local_file:
                count = 0
                steps = int(download_bytes / CHUNK) + 1
                while True:
                    chunk = dwnld_req.read(CHUNK)
                    gscript.percent(count, steps, 10)
                    count += 1
                    if not chunk:
                        break
                    local_file.write(chunk)
                gscript.percent(1, 1, 1)
            local_file.close()
            download_count += 1
            # determine if file is a zip archive or another format
            if product_is_zip:
                local_zip_path_list.append(local_file_path)
            else:
                local_tile_path_list.append(local_file_path)
            file_complete = "Download {0} of {1}: COMPLETE".format(
                download_count, TNM_count)
            gscript.info(file_complete)
        except URLError:
            gscript.fatal(
                _("USGS download request has timed out. Network or formatting error."
                  ))
        except StandardError:
            cleanup_list.append(local_file_path)
            if download_count:
                file_failed = "Download {0} of {1}: FAILED".format(
                    download_count, TNM_count)
                gscript.fatal(file_failed)

    # sets already downloaded zip files or tiles to be extracted or imported
    # our pre-stats for extraction are broken, collecting stats during
    used_existing_extracted_tiles_num = 0
    removed_extracted_tiles_num = 0
    old_extracted_tiles_num = 0
    extracted_tiles_num = 0
    if exist_zip_list:
        for z in exist_zip_list:
            local_zip_path_list.append(z)
    if exist_tile_list:
        for t in exist_tile_list:
            local_tile_path_list.append(t)
    if product_is_zip:
        if file_download_count == 0:
            pass
        else:
            gscript.message("Extracting data...")
        # for each zip archive, extract needed file
        files_to_process = len(local_zip_path_list)
        for i, z in enumerate(local_zip_path_list):
            # TODO: measure only for the files being unzipped
            gscript.percent(i, files_to_process, 10)
            # Extract tiles from ZIP archives
            try:
                with zipfile.ZipFile(z, "r") as read_zip:
                    for f in read_zip.namelist():
                        if f.lower().endswith(product_extensions):
                            extracted_tile = os.path.join(work_dir, str(f))
                            remove_and_extract = True
                            if os.path.exists(extracted_tile):
                                if use_existing_extracted_files:
                                    # if the downloaded file is newer
                                    # than the extracted on, we extract
                                    if os.path.getmtime(
                                            extracted_tile) < os.path.getmtime(
                                                z):
                                        remove_and_extract = True
                                        old_extracted_tiles_num += 1
                                    else:
                                        remove_and_extract = False
                                        used_existing_extracted_tiles_num += 1
                                else:
                                    remove_and_extract = True
                                if remove_and_extract:
                                    removed_extracted_tiles_num += 1
                                    os.remove(extracted_tile)
                            if remove_and_extract:
                                extracted_tiles_num += 1
                                read_zip.extract(f, work_dir)
                if os.path.exists(extracted_tile):
                    local_tile_path_list.append(extracted_tile)
                    if not preserve_extracted_files:
                        cleanup_list.append(extracted_tile)
            except IOError as error:
                cleanup_list.append(extracted_tile)
                gscript.fatal(
                    _("Unable to locate or extract IMG file '{filename}'"
                      " from ZIP archive '{zipname}': {error}").format(
                          filename=extracted_tile, zipname=z, error=error))
        gscript.percent(1, 1, 1)
        # TODO: do this before the extraction begins
        gscript.verbose(
            _("Extracted {extracted} new tiles and"
              " used {used} existing tiles").format(
                  used=used_existing_extracted_tiles_num,
                  extracted=extracted_tiles_num))
        if old_extracted_tiles_num:
            gscript.verbose(
                _("Found {removed} existing tiles older"
                  " than the corresponding downloaded archive").format(
                      removed=old_extracted_tiles_num))
        if removed_extracted_tiles_num:
            gscript.verbose(
                _("Removed {removed} existing tiles").format(
                    removed=removed_extracted_tiles_num))

    if gui_product == 'lidar' and not has_pdal:
        gscript.fatal(
            _("Module v.in.pdal is missing,"
              " cannot process downloaded data."))

    # operations for extracted or complete files available locally
    # We are looking only for the existing maps in the current mapset,
    # but theoretically we could be getting them from other mapsets
    # on search path or from the whole location. User may also want to
    # store the individual tiles in a separate mapset.
    # The big assumption here is naming of the maps (it is a smaller
    # for the files in a dedicated download directory).
    used_existing_imported_tiles_num = 0
    imported_tiles_num = 0
    mapset = get_current_mapset()
    files_to_import = len(local_tile_path_list)

    def run_file_import(identifier, results, input, output, resolution,
                        resolution_value, extent, resample, memory):
        result = {}
        try:
            gscript.run_command('r.import',
                                input=input,
                                output=output,
                                resolution=resolution,
                                resolution_value=resolution_value,
                                extent=extent,
                                resample=resample,
                                memory=memory)
        except CalledModuleError:
            error = ("Unable to import <{0}>").format(output)
            result["errors"] = error
        else:
            result["output"] = output
        results[identifier] = result

    def run_lidar_import(identifier, results, input, output, input_srs=None):
        result = {}
        params = {}
        if input_srs:
            params['input_srs'] = input_srs
        try:
            gscript.run_command('v.in.pdal',
                                input=input,
                                output=output,
                                flags='wr',
                                **params)
        except CalledModuleError:
            error = ("Unable to import <{0}>").format(output)
            result["errors"] = error
        else:
            result["output"] = output
        results[identifier] = result

    process_list = []
    process_id_list = []
    process_count = 0
    num_tiles = len(local_tile_path_list)

    with Manager() as manager:
        results = manager.dict()
        for i, t in enumerate(local_tile_path_list):
            # create variables for use in GRASS GIS import process
            LT_file_name = os.path.basename(t)
            LT_layer_name = os.path.splitext(LT_file_name)[0]
            # we are removing the files if requested even if we don't use them
            # do not remove by default with NAIP, there are no zip files
            if gui_product != 'naip' and not preserve_extracted_files:
                cleanup_list.append(t)
            # TODO: unlike the files, we don't compare date with input
            if use_existing_imported_tiles and map_exists(
                    "raster", LT_layer_name, mapset):
                patch_names.append(LT_layer_name)
                used_existing_imported_tiles_num += 1
            else:
                in_info = _("Importing and reprojecting {name}"
                            " ({count} out of {total})...").format(
                                name=LT_file_name,
                                count=i + 1,
                                total=files_to_import)
                gscript.info(in_info)

                process_count += 1
                if gui_product != 'lidar':
                    process = Process(
                        name="Import-{}-{}-{}".format(process_count, i,
                                                      LT_layer_name),
                        target=run_file_import,
                        kwargs=dict(identifier=i,
                                    results=results,
                                    input=t,
                                    output=LT_layer_name,
                                    resolution='value',
                                    resolution_value=product_resolution,
                                    extent="region",
                                    resample=product_interpolation,
                                    memory=memory))
                else:
                    srs = options['input_srs']
                    process = Process(
                        name="Import-{}-{}-{}".format(process_count, i,
                                                      LT_layer_name),
                        target=run_lidar_import,
                        kwargs=dict(identifier=i,
                                    results=results,
                                    input=t,
                                    output=LT_layer_name,
                                    input_srs=srs if srs else None))
                process.start()
                process_list.append(process)
                process_id_list.append(i)

            # Wait for processes to finish when we reached the max number
            # of processes.
            if process_count == nprocs or i == num_tiles - 1:
                exitcodes = 0
                for process in process_list:
                    process.join()
                    exitcodes += process.exitcode
                if exitcodes != 0:
                    if nprocs > 1:
                        gscript.fatal(
                            _("Parallel import and reprojection failed."
                              " Try running with nprocs=1."))
                    else:
                        gscript.fatal(
                            _("Import and reprojection step failed."))
                for identifier in process_id_list:
                    if "errors" in results[identifier]:
                        gscript.warning(results[identifier]["errors"])
                    else:
                        patch_names.append(results[identifier]["output"])
                        imported_tiles_num += 1
                # Empty the process list
                process_list = []
                process_id_list = []
                process_count = 0
        # no process should be left now
        assert not process_list
        assert not process_id_list
        assert not process_count

    gscript.verbose(
        _("Imported {imported} new tiles and"
          " used {used} existing tiles").format(
              used=used_existing_imported_tiles_num,
              imported=imported_tiles_num))

    # if control variables match and multiple files need to be patched,
    # check product resolution, run r.patch

    # v.surf.rst lidar params
    rst_params = dict(tension=25, smooth=0.1, npmin=100)

    # Check that downloaded files match expected count
    completed_tiles_count = len(local_tile_path_list)
    if completed_tiles_count == tiles_needed_count:
        if len(patch_names) > 1:
            try:
                gscript.use_temp_region()
                # set the resolution
                if product_resolution:
                    gscript.run_command('g.region',
                                        res=product_resolution,
                                        flags='a')
                if gui_product == 'naip':
                    for i in ('1', '2', '3', '4'):
                        patch_names_i = [
                            name + '.' + i for name in patch_names
                        ]
                        output = gui_output_layer + '.' + i
                        gscript.run_command('r.patch',
                                            input=patch_names_i,
                                            output=output)
                        gscript.raster_history(output)
                elif gui_product == 'lidar':
                    gscript.run_command('v.patch',
                                        flags='nzb',
                                        input=patch_names,
                                        output=gui_output_layer)
                    gscript.run_command('v.surf.rst',
                                        input=gui_output_layer,
                                        elevation=gui_output_layer,
                                        nprocs=nprocs,
                                        **rst_params)
                else:
                    gscript.run_command('r.patch',
                                        input=patch_names,
                                        output=gui_output_layer)
                    gscript.raster_history(gui_output_layer)
                gscript.del_temp_region()
                out_info = ("Patched composite layer '{0}' added"
                            ).format(gui_output_layer)
                gscript.verbose(out_info)
                # Remove files if not -k flag
                if not preserve_imported_tiles:
                    if gui_product == 'naip':
                        for i in ('1', '2', '3', '4'):
                            patch_names_i = [
                                name + '.' + i for name in patch_names
                            ]
                            gscript.run_command('g.remove',
                                                type='raster',
                                                name=patch_names_i,
                                                flags='f')
                    elif gui_product == 'lidar':
                        gscript.run_command('g.remove',
                                            type='vector',
                                            name=patch_names +
                                            [gui_output_layer],
                                            flags='f')
                    else:
                        gscript.run_command('g.remove',
                                            type='raster',
                                            name=patch_names,
                                            flags='f')
            except CalledModuleError:
                gscript.fatal("Unable to patch tiles.")
            temp_down_count = _(
                "{0} of {1} tiles successfully imported and patched").format(
                    completed_tiles_count, tiles_needed_count)
            gscript.info(temp_down_count)
        elif len(patch_names) == 1:
            if gui_product == 'naip':
                for i in ('1', '2', '3', '4'):
                    gscript.run_command('g.rename',
                                        raster=(patch_names[0] + '.' + i,
                                                gui_output_layer + '.' + i))
            elif gui_product == 'lidar':
                gscript.run_command('v.surf.rst',
                                    input=patch_names[0],
                                    elevation=gui_output_layer,
                                    nprocs=nprocs,
                                    **rst_params)
                if not preserve_imported_tiles:
                    gscript.run_command('g.remove',
                                        type='vector',
                                        name=patch_names[0],
                                        flags='f')
            else:
                gscript.run_command('g.rename',
                                    raster=(patch_names[0], gui_output_layer))
            temp_down_count = _("Tile successfully imported")
            gscript.info(temp_down_count)
        else:
            gscript.fatal(
                _("No tiles imported successfully. Nothing to patch."))
    else:
        gscript.fatal(
            _("Error in getting or importing the data (see above). Please retry."
              ))

    # Keep source files if 'k' flag active
    if gui_k_flag:
        src_msg = (
            "<k> flag selected: Source tiles remain in '{0}'").format(work_dir)
        gscript.info(src_msg)

    # set appropriate color table
    if gui_product == 'ned':
        gscript.run_command('r.colors',
                            map=gui_output_layer,
                            color='elevation')

    # composite NAIP
    if gui_product == 'naip':
        gscript.use_temp_region()
        gscript.run_command('g.region', raster=gui_output_layer + '.1')
        gscript.run_command('r.composite',
                            red=gui_output_layer + '.1',
                            green=gui_output_layer + '.2',
                            blue=gui_output_layer + '.3',
                            output=gui_output_layer)
        gscript.raster_history(gui_output_layer)
        gscript.del_temp_region()
Exemplo n.º 29
0
def get_client():
    client = api.ClientV1()
    return client
Exemplo n.º 30
0
    con=engine_output)
#ais_gbdx_overlap["time_diff"] = (abs(ais_gbdx_overlap.timestamp - ais_gbdx_overlap.timestamps)).astype('timedelta64[s]')

# ### Planet API Call and image retrival
#Set up objetcs to make download request (clipping images!)

# Set Scene ID list
scene_id = ts_overlap_planet['id'].tolist()

# Set Item Type list
item_type = ts_overlap_planet['item_type'].tolist()
asset_type = 'visual'

#Call API
from planet import api
client = api.ClientV1(os.getenv('PLANET_API_KEY'))

#Activate requests (tell Planet that be ready to start image downloading)
assets_list = []
for ids, item in zip(scene_id, item_type):
    assets_list.append(client.get_assets_by_id(id=ids, item_type=item).get())

#Activate the assets and get the status (204 is the good code ;])
activation_list = [
    client.activate(x['analytic']) for x in assets_list
    if 'analytics' in x.keys()
]
for e, i in enumerate(activation_list):
    print(i.response.status_code)

#Download images! (mnt/data)