Exemple #1
0
    def items(self, limit=10000, headers=None):
        """ Return all of the Items and Collections for this search """
        _limit = 500

        items = []
        found = self.found(headers=headers)
        if found > limit:
            logger.warning(
                'There are more items found (%s) than the limit (%s) provided.'
                % (found, limit))
        maxitems = min(found, limit)
        kwargs = {'page': 1, 'limit': min(_limit, maxitems)}
        kwargs.update(self.kwargs)
        url = urljoin(self.url, 'search')
        while len(items) < maxitems:
            items += [
                Item(i) for i in self.query(url=url, headers=headers, **kwargs)
                ['features']
            ]
            kwargs['page'] += 1

        # retrieve collections
        collections = []
        try:
            for c in set([
                    item._data['collection'] for item in items
                    if 'collection' in item._data
            ]):
                collections.append(self.collection(c, headers=headers))
                #del collections[c]['links']
        except:
            pass

        return ItemCollection(items, collections=collections)
Exemple #2
0
 def items_by_id(cls, ids, collection):
     """ Return Items from collection with matching ids """
     col = cls.collection(collection)
     items = []
     base_url = urljoin(config.API_URL,
                        'collections/%s/items/' % collection)
     for id in ids:
         try:
             items.append(Item(cls.query(urljoin(base_url, id))))
         except SatSearchError as err:
             pass
     return ItemCollection(items, collections=[col])
Exemple #3
0
def main(items=None,
         printmd=None,
         printcal=None,
         printassets=None,
         found=False,
         filename_template='${collection}/${date}/${id}',
         save=None,
         download=None,
         requester_pays=False,
         headers=None,
         **kwargs):
    """ Main function for performing a search """

    if items is None:
        ## if there are no items then perform a search
        search = Search.search(headers=headers, **kwargs)
        if found:
            num = search.found(headers=headers)
            print('%s items found' % num)
            return num
        items = search.items(headers=headers)
    else:
        # otherwise, load a search from a file
        items = ItemCollection.open(items)

    print('%s items found' % len(items))

    # print metadata
    if printmd is not None:
        print(items.summary(printmd))

    # print calendar
    if printcal:
        print(items.calendar(printcal))

    if printassets:
        print(items.assets_definition())

    # save all metadata in JSON file
    if save is not None:
        items.save(filename=save)

    # download files given `download` keys
    if download is not None:
        if 'ALL' in download:
            # get complete set of assets
            download = set([k for i in items for k in i.assets])
        for key in download:
            items.download(key=key,
                           filename_template=filename_template,
                           requester_pays=requester_pays)

    return items
Exemple #4
0
def main(items=None, fetch=None, save=None, **kwargs):
    """ Main function for performing a search """
    _save = save if items is None else None
    items = satsearch(items, save=_save, **kwargs)
    # if not downloading nothing more to do
    if fetch is None:
        return

    # check that there is a valid geometry for clipping
    feature = items._search.get('parameters', {}).get('intersects', None)
    if feature is None:
        raise Exception('No geometry provided')

    derived_items = []
    # for each date, combine scenes
    for date in items.dates():
        print('Processing files for %s' % date)
        _items = [s for s in items if s.date == date]
        # TODO - split out by user specified metadata (e.g., platform, collection)
        item = satfetch(_items, feature['geometry'], fetch)
        derived_items.append(item)

    # this needs update to sat-stac to support adding metadata to Items
    # see https://github.com/sat-utils/sat-stac/issues/39
    #props = {
    #    'software': 'sat-fetch v%s' % __version__
    #}

    col = Collection.create()
    col._data['id'] = 'sat-fetch'
    col._data['description'] = 'Fetch items created by sat-fetch'
    col._data['links'].append({
        'rel': 'about',
        'href': 'https://github.com/sat-utils/sat-fetch'
    })
    derived_items = ItemCollection(derived_items, collections=[col])
    if save is not None:
        derived_items.save(save)
    return derived_items
Exemple #5
0
    def items(self, limit=10000, page_limit=500, headers=None):
        """ Return all of the Items and Collections for this search """
        found = self.found(headers=headers)
        limit = self.limit or limit
        if found > limit:
            logger.warning(
                'There are more items found (%s) than the limit (%s) provided.'
                % (found, limit))

        nextlink = {
            'method': 'POST',
            'href': urljoin(self.url, 'search'),
            'headers': headers,
            'body': self.kwargs,
            'merge': False
        }

        items = []
        while nextlink and len(items) < limit:
            if nextlink.get('method', 'GET') == 'GET':
                resp = self.query(url=nextlink['href'],
                                  headers=headers,
                                  **self.kwargs)
            else:
                _headers = nextlink.get('headers', {})
                _body = nextlink.get('body', {})
                _body.update({'limit': page_limit})

                if nextlink.get('merge', False):
                    _headers.update(headers)
                    _body.update(self.kwargs)
                resp = self.query(url=nextlink['href'],
                                  headers=_headers,
                                  **_body)
            items += [Item(i) for i in resp['features']]
            links = [l for l in resp['links'] if l['rel'] == 'next']
            nextlink = links[0] if len(links) == 1 else None

        # retrieve collections
        collections = []
        try:
            for c in set([
                    item._data['collection'] for item in items
                    if 'collection' in item._data
            ]):
                collections.append(self.collection(c, headers=headers))
                #del collections[c]['links']
        except:
            pass
        logger.debug(f"Found: {len(items)}")
        return ItemCollection(items, collections=collections)
Exemple #6
0
def main(items=None,
         printmd=None,
         printcal=False,
         found=False,
         save=None,
         download=None,
         requestor_pays=False,
         **kwargs):
    """ Main function for performing a search """

    if items is None:
        ## if there are no items then perform a search
        search = Search.search(**kwargs)
        if found:
            num = search.found()
            print('%s items found' % num)
            return num
        items = search.items()
    else:
        # otherwise, load a search from a file
        items = ItemCollection.load(items)

    print('%s items found' % len(items))

    # print metadata
    if printmd is not None:
        print(items.summary(printmd))

    # print calendar
    if printcal:
        print(items.calendar())

    # save all metadata in JSON file
    if save is not None:
        items.save(filename=save)

    # download files given `download` keys
    if download is not None:
        if 'ALL' in download:
            # get complete set of assets
            download = set([k for i in items for k in i.assets])
        for key in download:
            items.download(key=key,
                           path=config.DATADIR,
                           filename=config.FILENAME,
                           requestor_pays=requestor_pays)

    return items
Exemple #7
0
    def items(self, limit=10000):
        """ Return all of the Items and Collections for this search """
        _limit = 500
        if 'ids' in self.kwargs:
            col = self.kwargs.get('query', {}).get('collection',
                                                   {}).get('eq', None)
            if col is None:
                raise SatSearchError(
                    'Collection required when searching by id')
            return self.items_by_id(self.kwargs['ids'], col)

        items = []
        found = self.found()
        if found > limit:
            logger.warning(
                'There are more items found (%s) than the limit (%s) provided.'
                % (found, limit))
        maxitems = min(found, limit)
        kwargs = {'page': 1, 'limit': min(_limit, maxitems)}
        kwargs.update(self.kwargs)
        while len(items) < maxitems:
            items += [Item(i) for i in self.query(**kwargs)['features']]
            kwargs['page'] += 1

        # retrieve collections
        collections = []
        for c in set([
                item.properties['collection'] for item in items
                if 'collection' in item.properties
        ]):
            collections.append(self.collection(c))
            #del collections[c]['links']

        # merge collections into items
        #_items = []
        #for item in items:
        #    import pdb; pdb.set_trace()
        #    if 'collection' in item['properties']:
        #        item = dict_merge(item, collections[item['properties']['collection']])
        #    _items.append(Item(item))

        search = {'endpoint': config.API_URL, 'parameters': self.kwargs}
        return ItemCollection(items, collections=collections, search=search)
 def load_items(self):
     return ItemCollection.load(os.path.join(testpath, 'items.json'))
Exemple #9
0
 def get_items(self):
     return ItemCollection.load(op.join(testpath, 'items-landsat.geojson'))