def get_images_by_catid(self, catid): """ Retrieves the IDAHO image records associated with a given catid. Args: catid (str): The source catalog ID from the platform catalog. Returns: results (json): The full catalog-search response for IDAHO images within the catID. """ self.logger.debug('Retrieving IDAHO metadata') # get the footprint of the catid's strip footprint = self.catalog.get_strip_footprint_wkt(catid) # try to convert from multipolygon to polygon: try: footprint = from_wkt(footprint).geoms[0].wkt except: pass if not footprint: self.logger.debug("""Cannot get IDAHO metadata for strip %s, footprint not found""" % catid) return None return self.get_images_by_catid_and_aoi(catid=catid, aoi_wkt=footprint)
def query(self, searchAreaWkt, query, count=100, ttl='5m', index=default_index): ''' Perform a vector services query using the QUERY API (https://gbdxdocs.digitalglobe.com/docs/vs-query-list-vector-items-returns-default-fields) Args: searchAreaWkt: WKT Polygon of area to search query: Elastic Search query count: Maximum number of results to return ttl: Amount of time for each temporary vector page to exist Returns: List of vector results ''' if count < 1000: # issue a single page query search_area_polygon = from_wkt(searchAreaWkt) left, lower, right, upper = search_area_polygon.bounds params = { "q": query, "count": min(count,1000), "left": left, "right": right, "lower": lower, "upper": upper } url = self.query_index_url % index if index else self.query_url r = self.gbdx_connection.get(url, params=params) r.raise_for_status() return r.json() else: return list(self.query_iteratively(searchAreaWkt, query, count, ttl, index))
def create_leaflet_viewer(self, idaho_image_results, filename): """Create a leaflet viewer html file for viewing idaho images. Args: idaho_image_results (dict): IDAHO image result set as returned from the catalog. filename (str): Where to save output html file. """ description = self.describe_images(idaho_image_results) if len(description) > 0: functionstring = '' for catid, images in description.items(): for partnum, part in images['parts'].items(): num_images = len(list(part.keys())) partname = None if num_images == 1: # there is only one image, use the PAN partname = [p for p in list(part.keys())][0] pan_image_id = '' elif num_images == 2: # there are two images in this part, use the multi (or pansharpen) partname = [ p for p in list(part.keys()) if p is not 'PAN' ][0] pan_image_id = part['PAN']['id'] if not partname: self.logger.debug("Cannot find part for idaho image.") continue bandstr = { 'RGBN': '0,1,2', 'WORLDVIEW_8_BAND': '4,2,1', 'PAN': '0' }.get(partname, '0,1,2') part_boundstr_wkt = part[partname]['boundstr'] part_polygon = from_wkt(part_boundstr_wkt) bucketname = part[partname]['bucket'] image_id = part[partname]['id'] W, S, E, N = part_polygon.bounds functionstring += "addLayerToMap('%s','%s',%s,%s,%s,%s,'%s');\n" % ( bucketname, image_id, W, S, E, N, pan_image_id) __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) try: with open( os.path.join(__location__, 'leafletmap_template.html'), 'r') as htmlfile: data = htmlfile.read().decode("utf8") except AttributeError: with open( os.path.join(__location__, 'leafletmap_template.html'), 'r') as htmlfile: data = htmlfile.read() data = data.replace('FUNCTIONSTRING', functionstring) data = data.replace('CENTERLAT', str(S)) data = data.replace('CENTERLON', str(W)) data = data.replace('BANDS', bandstr) data = data.replace('TOKEN', self.gbdx_connection.access_token) with codecs.open(filename, 'w', 'utf8') as outputfile: self.logger.debug("Saving %s" % filename) outputfile.write(data) else: print('No items returned.')
def get_tms_layers(self, catid, bands='4,2,1', gamma=1.3, highcutoff=0.98, lowcutoff=0.02, brightness=1.0, contrast=1.0): """Get list of urls and bounding boxes corrsponding to idaho images for a given catalog id. Args: catid (str): Catalog id bands (str): Bands to display, separated by commas (0-7). gamma (float): gamma coefficient. This is for on-the-fly pansharpening. highcutoff (float): High cut off coefficient (0.0 to 1.0). This is for on-the-fly pansharpening. lowcutoff (float): Low cut off coefficient (0.0 to 1.0). This is for on-the-fly pansharpening. brightness (float): Brightness coefficient (0.0 to 1.0). This is for on-the-fly pansharpening. contrast (float): Contrast coefficient (0.0 to 1.0). This is for on-the-fly pansharpening. Returns: urls (list): TMS urls. bboxes (list of tuples): Each tuple is (W, S, E, N) where (W,S,E,N) are the bounds of the corresponding idaho part. """ description = self.describe_images(self.get_images_by_catid(catid)) service_url = 'http://idaho.geobigdata.io/v1/tile/' urls, bboxes = [], [] for catid, images in description.items(): for partnum, part in images['parts'].items(): if 'PAN' in part.keys(): pan_id = part['PAN']['id'] if 'WORLDVIEW_8_BAND' in part.keys(): ms_id = part['WORLDVIEW_8_BAND']['id'] ms_partname = 'WORLDVIEW_8_BAND' elif 'RGBN' in part.keys(): ms_id = part['RGBN']['id'] ms_partname = 'RGBN' if ms_id: if pan_id: band_str = ms_id + '/{z}/{x}/{y}?bands=' + bands + '&panId=' + pan_id else: band_str = ms_id + '/{z}/{x}/{y}?bands=' + bands bbox = from_wkt(part[ms_partname]['boundstr']).bounds elif not ms_id and pan_id: band_str = pan_id + '/{z}/{x}/{y}?bands=0' bbox = from_wkt(part['PAN']['boundstr']).bounds else: continue bboxes.append(bbox) # Get the bucket. It has to be the same for all entries in the part. bucket = part[list(part.keys())[0]]['bucket'] # Get the token token = self.gbdx_connection.access_token # Assemble url url = (service_url + bucket + '/' + band_str + """&gamma={} &highCutoff={} &lowCutoff={} &brightness={} &contrast={} &token={}""".format( gamma, highcutoff, lowcutoff, brightness, contrast, token)) urls.append(url) return urls, bboxes
def query_iteratively(self, searchAreaWkt, query, count=100, ttl='5m', index=default_index): ''' Perform a vector services query using the QUERY API (https://gbdxdocs.digitalglobe.com/docs/vs-query-list-vector-items-returns-default-fields) Args: searchAreaWkt: WKT Polygon of area to search query: Elastic Search query count: Maximum number of results to return ttl: Amount of time for each temporary vector page to exist Returns: generator of vector results ''' search_area_polygon = from_wkt(searchAreaWkt) left, lower, right, upper = search_area_polygon.bounds params = { "q": query, "count": min(count, 1000), "ttl": ttl, "left": left, "right": right, "lower": lower, "upper": upper } # initialize paging request url = self.query_index_page_url % index if index else self.query_page_url r = self.gbdx_connection.get(url, params=params) r.raise_for_status() page = r.json() paging_id = page['next_paging_id'] item_count = int(page['item_count']) data = page['data'] num_results = 0 for vector in data: num_results += 1 if num_results > count: break yield vector if num_results == count: return # get vectors from each page while paging_id and item_count > 0 and num_results < count: headers = {'Content-Type': 'application/x-www-form-urlencoded'} data = {"pagingId": paging_id, "ttl": ttl} r = self.gbdx_connection.post(self.page_url, headers=headers, data=data) r.raise_for_status() page = r.json() paging_id = page['next_paging_id'] item_count = int(page['item_count']) data = page['data'] for vector in data: num_results += 1 if num_results > count: break yield vector
def query_iteratively(self, searchAreaWkt, query, count=100, ttl='5m', index=default_index): ''' Perform a vector services query using the QUERY API (https://gbdxdocs.digitalglobe.com/docs/vs-query-list-vector-items-returns-default-fields) Args: searchAreaWkt: WKT Polygon of area to search query: Elastic Search query count: Maximum number of results to return ttl: Amount of time for each temporary vector page to exist Returns: generator of vector results ''' search_area_polygon = from_wkt(searchAreaWkt) left, lower, right, upper = search_area_polygon.bounds params = { "q": query, "count": min(count,1000), "ttl": ttl, "left": left, "right": right, "lower": lower, "upper": upper } # initialize paging request url = self.query_index_page_url % index if index else self.query_page_url r = self.gbdx_connection.get(url, params=params) r.raise_for_status() page = r.json() paging_id = page['next_paging_id'] item_count = int(page['item_count']) data = page['data'] num_results = 0 for vector in data: num_results += 1 if num_results > count: break yield vector if num_results == count: return # get vectors from each page while paging_id and item_count > 0 and num_results < count: headers = {'Content-Type':'application/x-www-form-urlencoded'} data = { "pagingId": paging_id, "ttl": ttl } r = self.gbdx_connection.post(self.page_url, headers=headers, data=data) r.raise_for_status() page = r.json() paging_id = page['next_paging_id'] item_count = int(page['item_count']) data = page['data'] for vector in data: num_results += 1 if num_results > count: break yield vector
def create_leaflet_viewer(self, idaho_image_results, filename): """Create a leaflet viewer html file for viewing idaho images. Args: idaho_image_results (dict): IDAHO image result set as returned from the catalog. filename (str): Where to save output html file. """ description = self.describe_images(idaho_image_results) if len(description) > 0: functionstring = '' for catid, images in description.items(): for partnum, part in images['parts'].items(): num_images = len(list(part.keys())) partname = None if num_images == 1: # there is only one image, use the PAN partname = [p for p in list(part.keys())][0] pan_image_id = '' elif num_images == 2: # there are two images in this part, use the multi (or pansharpen) partname = [p for p in list(part.keys()) if p is not 'PAN'][0] pan_image_id = part['PAN']['id'] if not partname: self.logger.debug("Cannot find part for idaho image.") continue bandstr = { 'RGBN': '0,1,2', 'WORLDVIEW_8_BAND': '4,2,1', 'PAN': '0' }.get(partname, '0,1,2') part_boundstr_wkt = part[partname]['boundstr'] part_polygon = from_wkt(part_boundstr_wkt) bucketname = part[partname]['bucket'] image_id = part[partname]['id'] W, S, E, N = part_polygon.bounds functionstring += "addLayerToMap('%s','%s',%s,%s,%s,%s,'%s');\n" % ( bucketname, image_id, W, S, E, N, pan_image_id) __location__ = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) try: with open(os.path.join(__location__, 'leafletmap_template.html'), 'r') as htmlfile: data = htmlfile.read().decode("utf8") except AttributeError: with open(os.path.join(__location__, 'leafletmap_template.html'), 'r') as htmlfile: data = htmlfile.read() data = data.replace('FUNCTIONSTRING', functionstring) data = data.replace('CENTERLAT', str(S)) data = data.replace('CENTERLON', str(W)) data = data.replace('BANDS', bandstr) data = data.replace('TOKEN', self.gbdx_connection.access_token) with codecs.open(filename, 'w', 'utf8') as outputfile: self.logger.debug("Saving %s" % filename) outputfile.write(data) else: print('No items returned.')
def get_tms_layers(self, catid, bands='4,2,1', gamma=1.3, highcutoff=0.98, lowcutoff=0.02, brightness=1.0, contrast=1.0): """Get list of urls and bounding boxes corrsponding to idaho images for a given catalog id. Args: catid (str): Catalog id bands (str): Bands to display, separated by commas (0-7). gamma (float): gamma coefficient. This is for on-the-fly pansharpening. highcutoff (float): High cut off coefficient (0.0 to 1.0). This is for on-the-fly pansharpening. lowcutoff (float): Low cut off coefficient (0.0 to 1.0). This is for on-the-fly pansharpening. brightness (float): Brightness coefficient (0.0 to 1.0). This is for on-the-fly pansharpening. contrast (float): Contrast coefficient (0.0 to 1.0). This is for on-the-fly pansharpening. Returns: urls (list): TMS urls. bboxes (list of tuples): Each tuple is (W, S, E, N) where (W,S,E,N) are the bounds of the corresponding idaho part. """ description = self.describe_images(self.get_images_by_catid(catid)) service_url = 'http://idaho.geobigdata.io/v1/tile/' urls, bboxes = [], [] for catid, images in description.items(): for partnum, part in images['parts'].items(): if 'PAN' in part.keys(): pan_id = part['PAN']['id'] if 'WORLDVIEW_8_BAND' in part.keys(): ms_id = part['WORLDVIEW_8_BAND']['id'] ms_partname = 'WORLDVIEW_8_BAND' elif 'RGBN' in part.keys(): ms_id = part['RGBN']['id'] ms_partname = 'RGBN' if ms_id: if pan_id: band_str = ms_id + '/{z}/{x}/{y}?bands=' + bands + '&panId=' + pan_id else: band_str = ms_id + '/{z}/{x}/{y}?bands=' + bands bbox = from_wkt(part[ms_partname]['boundstr']).bounds elif not ms_id and pan_id: band_str = pan_id + '/{z}/{x}/{y}?bands=0' bbox = from_wkt(part['PAN']['boundstr']).bounds else: continue bboxes.append(bbox) # Get the bucket. It has to be the same for all entries in the part. bucket = part[list(part.keys())[0]]['bucket'] # Get the token token = self.gbdx_connection.access_token # Assemble url url = (service_url + bucket + '/' + band_str + """&gamma={} &highCutoff={} &lowCutoff={} &brightness={} &contrast={} &token={}""".format(gamma, highcutoff, lowcutoff, brightness, contrast, token)) urls.append(url) return urls, bboxes