Exemple #1
0
class SentinelDownloader(object):
    def __init__(self, user, password, api_url='https://scihub.copernicus.eu/apihub'):
        self._apiname = api_url
        self._user = user
        self._password = password

        # init logger
        root = logging.getLogger()
        root.addHandler(logging.StreamHandler(
            sys.stderr
        ))
        if self._apiname == 'https://scihub.copernicus.eu/apihub':
            try:
                from sentinelsat import SentinelAPI
            except ImportError as e:
                gs.fatal(_("Module requires sentinelsat library: {}").format(e))
            # connect SciHub via API
            self._api = SentinelAPI(self._user, self._password,
                                    api_url=self._apiname
                                    )
        elif self._apiname == 'USGS_EE':
            try:
                import landsatxplore.api
                from landsatxplore.errors import EarthExplorerError
            except ImportError as e:
                gs.fatal(_("Module requires landsatxplore library: {}").format(e))
            api_login = False
            while api_login is False:
                # avoid login conflict in possible parallel execution
                try:
                    self._api = landsatxplore.api.API(self._user,
                                                      self._password)
                    api_login = True
                except EarthExplorerError as e:
                    time.sleep(1)
        self._products_df_sorted = None

    def filter(self, area, area_relation,
               clouds=None, producttype=None, limit=None, query={},
               start=None, end=None, sortby=[], asc=True, relativeorbitnumber=None):
        args = {}
        if clouds:
            args['cloudcoverpercentage'] = (0, int(clouds))
        if relativeorbitnumber:
            args['relativeorbitnumber'] = relativeorbitnumber
            if producttype.startswith('S2') and int(relativeorbitnumber) > 143:
                gs.warning("This relative orbit number is out of range")
            elif int(relativeorbitnumber) > 175:
                gs.warning(_("This relative orbit number is out of range"))
        if producttype:
            args['producttype'] = producttype
            if producttype.startswith('S2'):
                args['platformname'] = 'Sentinel-2'
            else:
                args['platformname'] = 'Sentinel-1'
        if not start:
            start = 'NOW-60DAYS'
        else:
            start = start.replace('-', '')
        if not end:
            end = 'NOW'
        else:
            end = end.replace('-', '')
        if query:
            redefined = [value for value in args.keys() if value in query.keys()]
            if redefined:
                gs.warning(_("Query overrides already defined options ({})").format(
                    ','.join(redefined)
                ))
            args.update(query)
        gs.verbose(_("Query: area={} area_relation={} date=({}, {}) args={}").format(
            area, area_relation, start, end, args
        ))
        products = self._api.query(
            area=area, area_relation=area_relation,
            date=(start, end),
            **args
        )
        products_df = self._api.to_dataframe(products)
        if len(products_df) < 1:
            gs.message(_("No product found"))
            return

        # sort and limit to first sorted product
        if sortby:
            self._products_df_sorted = products_df.sort_values(
                sortby,
                ascending=[asc] * len(sortby)
            )
        else:
            self._products_df_sorted = products_df

        if limit:
            self._products_df_sorted = self._products_df_sorted.head(int(limit))

        gs.message(_("{} Sentinel product(s) found").format(len(self._products_df_sorted)))

    def list(self):
        if self._products_df_sorted is None:
            return
        id_kw = ('uuid', 'entity_id')
        identifier_kw = ('identifier', 'display_id')
        cloud_kw = ('cloudcoverpercentage', 'cloud_cover')
        time_kw = ('beginposition', 'acquisition_date')
        kw_idx = 1 if self._apiname == 'USGS_EE' else 0
        for idx in range(len(self._products_df_sorted[id_kw[kw_idx]])):
            if cloud_kw[kw_idx] in self._products_df_sorted:
                ccp = '{0:2.0f}%'.format(
                    float(self._products_df_sorted[cloud_kw[kw_idx]][idx]))
            else:
                ccp = 'cloudcover_NA'

            print_str = '{0} {1}'.format(
                self._products_df_sorted[id_kw[kw_idx]][idx],
                self._products_df_sorted[identifier_kw[kw_idx]][idx])
            if kw_idx == 1:
                time_string = self._products_df_sorted[time_kw[kw_idx]][idx]
            else:
                time_string = self._products_df_sorted[
                    time_kw[kw_idx]][idx].strftime("%Y-%m-%dT%H:%M:%SZ")
            print_str += ' {0} {1}'.format(time_string, ccp)
            if kw_idx == 0:
                print_str += ' {0}'.format(
                    self._products_df_sorted['producttype'][idx])

            print(print_str)

    def download(self, output, sleep=False, maxretry=False,
                 datasource='ESA_COAH'):
        if self._products_df_sorted is None:
            return

        create_dir(output)
        gs.message(_("Downloading data into <{}>...").format(output))
        if datasource == 'USGS_EE':
            from landsatxplore.earthexplorer import EarthExplorer
            from landsatxplore.errors import EarthExplorerError
            from zipfile import ZipFile
            ee_login = False
            while ee_login is False:
                # avoid login conflict in possible parallel execution
                try:
                    ee = EarthExplorer(self._user, self._password)
                    ee_login = True
                except EarthExplorerError as e:
                    time.sleep(1)
            for idx in range(len(self._products_df_sorted['entity_id'])):
                scene = self._products_df_sorted['entity_id'][idx]
                identifier = self._products_df_sorted['display_id'][idx]
                zip_file = os.path.join(output, '{}.zip'.format(identifier))
                gs.message(_("Downloading {}...").format(identifier))
                try:
                    ee.download(identifier=identifier, output_dir=output, timeout=600)
                except EarthExplorerError as e:
                    gs.fatal(_(e))
                ee.logout()
                # extract .zip to get "usual" .SAFE
                with ZipFile(zip_file, 'r') as zip:
                    safe_name = zip.namelist()[0].split('/')[0]
                    outpath = os.path.join(output, safe_name)
                    zip.extractall(path=output)
                gs.message(_("Downloaded to <{}>").format(outpath))
                try:
                    os.remove(zip_file)
                except Exception as e:
                    gs.warning(_("Unable to remove {0}:{1}").format(
                        zip_file, e))

        elif datasource == "ESA_COAH":
            for idx in range(len(self._products_df_sorted['uuid'])):
                gs.message('{} -> {}.SAFE'.format(
                    self._products_df_sorted['uuid'][idx],
                    os.path.join(output, self._products_df_sorted['identifier'][idx])
                ))
                # download
                out = self._api.download(self._products_df_sorted['uuid'][idx],
                                         output)
                if sleep:
                    x = 1
                    online = out['Online']
                    while not online:
                        # sleep is in minutes so multiply by 60
                        time.sleep(int(sleep) * 60)
                        out = self._api.download(self._products_df_sorted['uuid'][idx],
                                                 output)
                        x += 1
                        if x > maxretry:
                            online = True
        elif datasource == 'GCS':
            for scene_id in self._products_df_sorted['identifier']:
                gs.message(_("Downloading {}...").format(scene_id))
                dl_code = download_gcs(scene_id, output)
                if dl_code == 0:
                    gs.message(_("Downloaded to {}").format(
                        os.path.join(output, '{}.SAFE'.format(scene_id))))
                else:
                    # remove incomplete file
                    del_folder = os.path.join(output,
                                              '{}.SAFE'.format(scene_id))
                    try:
                        shutil.rmtree(del_folder)
                    except Exception as e:
                        gs.warning(_("Unable to removed unfinished "
                                     "download {}".format(del_folder)))

    def save_footprints(self, map_name):
        if self._products_df_sorted is None:
            return
        if self._apiname == 'USGS_EE':
            gs.fatal(_(
                "USGS Earth Explorer does not support footprint download."))
        try:
            from osgeo import ogr, osr
        except ImportError as e:
            gs.fatal(_("Option <footprints> requires GDAL library: {}").format(e))

        gs.message(_("Writing footprints into <{}>...").format(map_name))
        driver = ogr.GetDriverByName("GPKG")
        tmp_name = gs.tempfile() + '.gpkg'
        data_source = driver.CreateDataSource(tmp_name)

        srs = osr.SpatialReference()
        srs.ImportFromEPSG(4326)

        # features can be polygons or multi-polygons
        layer = data_source.CreateLayer(str(map_name), srs, ogr.wkbMultiPolygon)

        # attributes
        attrs = OrderedDict([
            ("uuid", ogr.OFTString),
            ("ingestiondate", ogr.OFTString),
            ("cloudcoverpercentage", ogr.OFTInteger),
            ("producttype", ogr.OFTString),
            ("identifier", ogr.OFTString)
        ])

        # Sentinel-1 data does not have cloudcoverpercentage
        prod_types = [type for type in self._products_df_sorted["producttype"]]
        s1_types = ["SLC", "GRD"]
        if any(type in prod_types for type in s1_types):
            del attrs["cloudcoverpercentage"]

        for key in attrs.keys():
            field = ogr.FieldDefn(key, attrs[key])
            layer.CreateField(field)

        # features
        for idx in range(len(self._products_df_sorted['uuid'])):
            wkt = self._products_df_sorted['footprint'][idx]
            feature = ogr.Feature(layer.GetLayerDefn())
            newgeom = ogr.CreateGeometryFromWkt(wkt)
            # convert polygons to multi-polygons
            newgeomtype = ogr.GT_Flatten(newgeom.GetGeometryType())
            if newgeomtype == ogr.wkbPolygon:
                multigeom = ogr.Geometry(ogr.wkbMultiPolygon)
                multigeom.AddGeometryDirectly(newgeom)
                feature.SetGeometry(multigeom)
            else:
                feature.SetGeometry(newgeom)
            for key in attrs.keys():
                if key == 'ingestiondate':
                    value = self._products_df_sorted[key][idx].strftime("%Y-%m-%dT%H:%M:%SZ")
                else:
                    value = self._products_df_sorted[key][idx]
                feature.SetField(key, value)
            layer.CreateFeature(feature)
            feature = None

        data_source = None

        # coordinates of footprints are in WKT -> fp precision issues
        # -> snap
        gs.run_command('v.import', input=tmp_name, output=map_name,
                       layer=map_name, snap=1e-10, quiet=True
                       )

    def get_products_from_uuid_usgs(self, uuid_list):
        scenes = []
        for uuid in uuid_list:
            metadata = self._api.metadata(uuid, 'SENTINEL_2A')
            scenes.append(metadata)
        scenes_df = pandas.DataFrame.from_dict(scenes)
        self._products_df_sorted = scenes_df
        gs.message(_("{} Sentinel product(s) found").format(
            len(self._products_df_sorted)))

    def set_uuid(self, uuid_list):
        """Set products by uuid.

        TODO: Find better implementation

        :param uuid: uuid to download
        """
        if self._apiname == 'USGS_EE':
            self.get_products_from_uuid_usgs(uuid_list)
        else:
            from sentinelsat.sentinel import SentinelAPIError

            self._products_df_sorted = {'uuid': []}
            for uuid in uuid_list:
                try:
                    odata = self._api.get_product_odata(uuid, full=True)
                except SentinelAPIError as e:
                    gs.error(_("{0}. UUID {1} skipped".format(e, uuid)))
                    continue

                for k, v in odata.items():
                    if k == 'id':
                        k = 'uuid'
                    elif k == 'Sensing start':
                        k = 'beginposition'
                    elif k == 'Product type':
                        k = 'producttype'
                    elif k == 'Cloud cover percentage':
                        k = 'cloudcoverpercentage'
                    elif k == 'Identifier':
                        k = 'identifier'
                    elif k == 'Ingestion Date':
                        k = 'ingestiondate'
                    elif k == 'footprint':
                        pass
                    else:
                        continue
                    if k not in self._products_df_sorted:
                        self._products_df_sorted[k] = []
                    self._products_df_sorted[k].append(v)

    def filter_USGS(self, area, area_relation, clouds=None, producttype=None,
                    limit=None, query={}, start=None, end=None, sortby=[],
                    asc=True, relativeorbitnumber=None):
        if area_relation != 'Intersects':
            gs.fatal(_(
                "USGS Earth Explorer only supports area_relation"
                " 'Intersects'"))
        if relativeorbitnumber:
            gs.fatal(_(
                "USGS Earth Explorer does not support 'relativeorbitnumber'"
                " option."))
        if producttype and producttype != 'S2MSI1C':
            gs.fatal(_(
                "USGS Earth Explorer only supports producttype S2MSI1C"))
        if query:
            if not any(key in query for key in ['identifier', 'filename',
                                                'usgs_identifier']):
                gs.fatal(_(
                    "USGS Earth Explorer only supports query options"
                    " 'filename', 'identifier' or 'usgs_identifier'."))
            if 'usgs_identifier' in query:
                # get entityId from usgs identifier and directly save results
                usgs_id = query['usgs_identifier']
                check_s2l1c_identifier(usgs_id, source='usgs')
                # entity_id = self._api.lookup('SENTINEL_2A', [usgs_id],
                #                              inverse=True)
                entity_id = self._api.get_entity_id([usgs_id], 'SENTINEL_2A')
                self.get_products_from_uuid_usgs(entity_id)
                return
            else:
                if "filename" in query:
                    esa_id = query['filename'].replace('.SAFE', '')
                else:
                    esa_id = query['identifier']
                check_s2l1c_identifier(esa_id, source='esa')
                esa_prod_id = esa_id.split('_')[-1]
                utm_tile = esa_id.split('_')[-2]
                acq_date = esa_id.split('_')[2].split('T')[0]
                acq_date_string = '{0}-{1}-{2}'.format(
                    acq_date[:4], acq_date[4:6], acq_date[6:])
                start_date = end_date = acq_date_string
                # build the USGS style S2-identifier
                if utm_tile.startswith('T'):
                    utm_tile_base = utm_tile[1:]
                bbox = get_bbox_from_S2_UTMtile(utm_tile_base)
        else:
            # get coordinate pairs from wkt string
            str_1 = 'POLYGON(('
            str_2 = '))'
            coords = area[area.find(str_1)+len(str_1):area.rfind(str_2)].split(',')
            # add one space to first pair for consistency
            coords[0] = ' ' + coords[0]
            lons = [float(pair.split(' ')[1]) for pair in coords]
            lats = [float(pair.split(' ')[2]) for pair in coords]
            bbox = (min(lons), min(lats), max(lons), max(lats))
            start_date = start
            end_date = end
        usgs_args = {
            'dataset': 'SENTINEL_2A',
            'bbox': bbox,
            'start_date': start_date,
            'end_date': end_date
        }
        if clouds:
            usgs_args['max_cloud_cover'] = clouds
        if limit:
            usgs_args['max_results'] = limit
        scenes = self._api.search(**usgs_args)
        self._api.logout()
        if query:
            # check if the UTM-Tile is correct, remove otherwise
            for scene in scenes:
                if scene['display_id'].split('_')[1] != utm_tile:
                    scenes.remove(scene)
            # remove redundant scene
            if len(scenes) == 2:
                for scene in scenes:
                    prod_id = scene['display_id'].split('_')[-1]
                    if prod_id != esa_prod_id:
                        scenes.remove(scene)
        if len(scenes) < 1:
            gs.message(_("No product found"))
            return
        scenes_df = pandas.DataFrame.from_dict(scenes)
        if sortby:
            # replace sortby keywords with USGS keywords
            for idx, keyword in enumerate(sortby):
                if keyword == 'cloudcoverpercentage':
                    sortby[idx] = 'cloud_cover'
                    # turn cloudcover to float to make it sortable
                    scenes_df['cloud_cover'] = pandas.to_numeric(
                        scenes_df['cloud_cover'])
                elif keyword == 'ingestiondate':
                    sortby[idx] = 'acquisition_date'
                # what does sorting by footprint mean
                elif keyword == 'footprint':
                    sortby[idx] = 'display_id'
            self._products_df_sorted = scenes_df.sort_values(
                sortby,
                ascending=[asc] * len(sortby), ignore_index=True
            )
        else:
            self._products_df_sorted = scenes_df
        gs.message(_("{} Sentinel product(s) found").format(
            len(self._products_df_sorted)))
class SentinelDownloader(object):
    def __init__(
        self,
        user,
        password,
        api_url="https://apihub.copernicus.eu/apihub",
        cred_req=True,
    ):
        self._apiname = api_url
        self._user = user
        self._password = password
        self._cred_req = cred_req

        # init logger
        root = logging.getLogger()
        root.addHandler(logging.StreamHandler(sys.stderr))
        if self._apiname not in ["USGS_EE", "GCS"]:
            try:
                from sentinelsat import SentinelAPI
            except ImportError as e:
                gs.fatal(
                    _("Module requires sentinelsat library: {}").format(e))
            # connect SciHub via API
            self._api = SentinelAPI(self._user,
                                    self._password,
                                    api_url=self._apiname)
        elif self._apiname == "USGS_EE":
            api_login = False
            while api_login is False:
                # avoid login conflict in possible parallel execution
                try:
                    self._api = landsatxplore.api.API(self._user,
                                                      self._password)
                    api_login = True
                except EarthExplorerError as e:
                    time.sleep(1)
        self._products_df_sorted = None

    def filter(
        self,
        area,
        area_relation,
        clouds=None,
        producttype=None,
        limit=None,
        query={},
        start=None,
        end=None,
        sortby=[],
        asc=True,
        relativeorbitnumber=None,
    ):
        # Dict to identify plaforms from requested product
        platforms = {
            "SL": "Sentinel-1",
            "GR": "Sentinel-1",
            "OC": "Sentinel-1",
            "S2": "Sentinel-2",
            "S3": "Sentinel-3",
        }
        args = {}
        if clouds:
            args["cloudcoverpercentage"] = (0, int(clouds))
        if relativeorbitnumber:
            args["relativeorbitnumber"] = relativeorbitnumber
            if producttype.startswith("S2") and int(relativeorbitnumber) > 143:
                gs.warning("This relative orbit number is out of range")
            elif int(relativeorbitnumber) > 175:
                gs.warning(_("This relative orbit number is out of range"))
        if producttype:
            if producttype.startswith("S3"):
                # Using custom product names for Sentinel-3 products that look less cryptic
                split = [0, 2, 4, 5, 8]
                args["producttype"] = "_".join([
                    producttype[i:j] for i, j in zip(split, split[1:] + [None])
                ][1:]).ljust(11, "_")
            else:
                args["producttype"] = producttype
            args["platformname"] = platforms[producttype[0:2]]
        if not start:
            start = "NOW-60DAYS"
        else:
            start = start.replace("-", "")
        if not end:
            end = "NOW"
        else:
            end = end.replace("-", "")
        if query:
            redefined = [
                value for value in args.keys() if value in query.keys()
            ]
            if redefined:
                gs.warning(
                    _("Query overrides already defined options ({})").format(
                        ",".join(redefined)))
            args.update(query)
        gs.verbose(
            _("Query: area={} area_relation={} date=({}, {}) args={}").format(
                area, area_relation, start, end, args))
        if self._cred_req is False:
            # in the main function it is ensured that there is an "identifier" query
            self._products_df_sorted = pandas.DataFrame(
                {"identifier": [query["identifier"]]})
            return

        products = self._api.query(area=area,
                                   area_relation=area_relation,
                                   date=(start, end),
                                   **args)
        products_df = self._api.to_dataframe(products)
        if len(products_df) < 1:
            gs.message(_("No product found"))
            return

        # sort and limit to first sorted product
        if sortby:
            self._products_df_sorted = products_df.sort_values(
                sortby, ascending=[asc] * len(sortby))
        else:
            self._products_df_sorted = products_df

        if limit:
            self._products_df_sorted = self._products_df_sorted.head(
                int(limit))

        gs.message(
            _("{} Sentinel product(s) found").format(
                len(self._products_df_sorted)))

    def list(self):
        if self._products_df_sorted is None:
            return
        id_kw = ("uuid", "entity_id")
        identifier_kw = ("identifier", "display_id")
        cloud_kw = ("cloudcoverpercentage", "cloud_cover")
        time_kw = ("beginposition", "acquisition_date")
        kw_idx = 1 if self._apiname == "USGS_EE" else 0
        for idx in range(len(self._products_df_sorted[id_kw[kw_idx]])):
            if cloud_kw[kw_idx] in self._products_df_sorted:
                ccp = "{0:2.0f}%".format(
                    float(self._products_df_sorted[cloud_kw[kw_idx]][idx]))
            else:
                ccp = "cloudcover_NA"

            print_str = "{0} {1}".format(
                self._products_df_sorted[id_kw[kw_idx]][idx],
                self._products_df_sorted[identifier_kw[kw_idx]][idx],
            )
            if kw_idx == 1:
                time_string = self._products_df_sorted[time_kw[kw_idx]][idx]
            else:
                time_string = self._products_df_sorted[
                    time_kw[kw_idx]][idx].strftime("%Y-%m-%dT%H:%M:%SZ")
            print_str += " {0} {1}".format(time_string, ccp)
            if kw_idx == 0:
                print_str += " {0}".format(
                    self._products_df_sorted["producttype"][idx])
                print_str += " {0}".format(
                    self._products_df_sorted["size"][idx])

            print(print_str)

    def skip_existing(self, output, pattern_file):
        prod_df_type = type(self._products_df_sorted)
        # Check i skipping is possible/required
        if prod_df_type != dict:
            if self._products_df_sorted.empty:
                return
        elif not self._products_df_sorted or os.path.exists(output) == False:
            return
        # Check if ingestion date is returned by API
        if "ingestiondate" not in self._products_df_sorted:
            gs.warning(
                _("Ingestiondate not returned. Cannot filter previously downloaded scenes"
                  ))
            return
        # Check for previously downloaded scenes
        existing_files = [
            f for f in os.listdir(output)
            if re.search(r".zip$|.safe$|.ZIP$|.SAFE$", f)
        ]
        if len(existing_files) <= 1:
            return
        # Filter by ingestion date
        skiprows = []
        for idx, display_id in enumerate(
                self._products_df_sorted["identifier"]):
            existing_file = [
                sfile for sfile in existing_files if display_id in sfile
            ]
            if existing_file:
                creation_time = datetime.fromtimestamp(
                    os.path.getctime(existing_file[0]))
                if self._products_df_sorted["ingestiondate"][
                        idx] <= creation_time:
                    gs.message(
                        _("Skipping scene: {} which is already downloaded.".
                          format(self._products_df_sorted["identifier"][idx])))
                    skiprows.append(display_id)
        if prod_df_type == dict:
            for scene in skiprows:
                idx = self._products_df_sorted["identifier"].index(scene)
                for key in self._products_df_sorted:
                    self._products_df_sorted[key].pop(idx)
        else:
            self._products_df_sorted = self._products_df_sorted[
                ~self._products_df_sorted["identifier"].isin(skiprows)]

    def download(self,
                 output,
                 sleep=False,
                 maxretry=False,
                 datasource="ESA_COAH"):
        if self._products_df_sorted is None:
            return

        create_dir(output)
        gs.message(_("Downloading data into <{}>...").format(output))
        if datasource == "USGS_EE":
            from landsatxplore.earthexplorer import EarthExplorer
            from landsatxplore.errors import EarthExplorerError
            from zipfile import ZipFile

            ee_login = False
            while ee_login is False:
                # avoid login conflict in possible parallel execution
                try:
                    ee = EarthExplorer(self._user, self._password)
                    ee_login = True
                except EarthExplorerError as e:
                    time.sleep(1)
            for idx in range(len(self._products_df_sorted["entity_id"])):
                scene = self._products_df_sorted["entity_id"][idx]
                identifier = self._products_df_sorted["display_id"][idx]
                zip_file = os.path.join(output, "{}.zip".format(identifier))
                gs.message(_("Downloading {}...").format(identifier))
                try:
                    ee.download(identifier=identifier,
                                output_dir=output,
                                timeout=600)
                except EarthExplorerError as e:
                    gs.fatal(_(e))
                ee.logout()
                # extract .zip to get "usual" .SAFE
                with ZipFile(zip_file, "r") as zip:
                    safe_name = zip.namelist()[0].split("/")[0]
                    outpath = os.path.join(output, safe_name)
                    zip.extractall(path=output)
                gs.message(_("Downloaded to <{}>").format(outpath))
                try:
                    os.remove(zip_file)
                except Exception as e:
                    gs.warning(
                        _("Unable to remove {0}:{1}").format(zip_file, e))

        elif datasource == "ESA_COAH":
            for idx in range(len(self._products_df_sorted["uuid"])):
                gs.message("{} -> {}.SAFE".format(
                    self._products_df_sorted["uuid"][idx],
                    os.path.join(output,
                                 self._products_df_sorted["identifier"][idx]),
                ))
                # download
                out = self._api.download(self._products_df_sorted["uuid"][idx],
                                         output)
                if sleep:
                    x = 1
                    online = out["Online"]
                    while not online:
                        # sleep is in minutes so multiply by 60
                        time.sleep(int(sleep) * 60)
                        out = self._api.download(
                            self._products_df_sorted["uuid"][idx], output)
                        x += 1
                        if x > maxretry:
                            online = True
        elif datasource == "GCS":
            for scene_id in self._products_df_sorted["identifier"]:
                gs.message(_("Downloading {}...").format(scene_id))
                dl_code = download_gcs(scene_id, output)
                if dl_code == 0:
                    gs.message(
                        _("Downloaded to {}").format(
                            os.path.join(output, "{}.SAFE".format(scene_id))))
                else:
                    # remove incomplete file
                    del_folder = os.path.join(output,
                                              "{}.SAFE".format(scene_id))
                    try:
                        shutil.rmtree(del_folder)
                    except Exception as e:
                        gs.warning(
                            _("Unable to removed unfinished "
                              "download {}".format(del_folder)))

    def save_footprints(self, map_name):
        if self._products_df_sorted is None:
            return
        if self._apiname == "USGS_EE":
            gs.fatal(
                _("USGS Earth Explorer does not support footprint download."))
        try:
            from osgeo import ogr, osr
        except ImportError as e:
            gs.fatal(
                _("Option <footprints> requires GDAL library: {}").format(e))

        gs.message(_("Writing footprints into <{}>...").format(map_name))
        driver = ogr.GetDriverByName("GPKG")
        tmp_name = gs.tempfile() + ".gpkg"
        data_source = driver.CreateDataSource(tmp_name)

        srs = osr.SpatialReference()
        srs.ImportFromEPSG(4326)

        # features can be polygons or multi-polygons
        layer = data_source.CreateLayer(str(map_name), srs,
                                        ogr.wkbMultiPolygon)

        # attributes
        attrs = OrderedDict([
            ("uuid", ogr.OFTString),
            ("ingestiondate", ogr.OFTString),
            ("cloudcoverpercentage", ogr.OFTInteger),
            ("producttype", ogr.OFTString),
            ("identifier", ogr.OFTString),
        ])

        # Sentinel-1 data does not have cloudcoverpercentage
        prod_types = [type for type in self._products_df_sorted["producttype"]]
        if not any(type in prod_types for type in cloudcover_products):
            del attrs["cloudcoverpercentage"]

        for key in attrs.keys():
            field = ogr.FieldDefn(key, attrs[key])
            layer.CreateField(field)

        # features
        for idx in range(len(self._products_df_sorted["uuid"])):
            wkt = self._products_df_sorted["footprint"][idx]
            feature = ogr.Feature(layer.GetLayerDefn())
            newgeom = ogr.CreateGeometryFromWkt(wkt)
            # convert polygons to multi-polygons
            newgeomtype = ogr.GT_Flatten(newgeom.GetGeometryType())
            if newgeomtype == ogr.wkbPolygon:
                multigeom = ogr.Geometry(ogr.wkbMultiPolygon)
                multigeom.AddGeometryDirectly(newgeom)
                feature.SetGeometry(multigeom)
            else:
                feature.SetGeometry(newgeom)
            for key in attrs.keys():
                if key == "ingestiondate":
                    value = self._products_df_sorted[key][idx].strftime(
                        "%Y-%m-%dT%H:%M:%SZ")
                else:
                    value = self._products_df_sorted[key][idx]
                feature.SetField(key, value)
            layer.CreateFeature(feature)
            feature = None

        data_source = None

        # coordinates of footprints are in WKT -> fp precision issues
        # -> snap
        gs.run_command(
            "v.import",
            input=tmp_name,
            output=map_name,
            layer=map_name,
            snap=1e-10,
            quiet=True,
        )

    def get_products_from_uuid_usgs(self, uuid_list):
        scenes = []
        for uuid in uuid_list:
            metadata = self._api.metadata(uuid, "SENTINEL_2A")
            scenes.append(metadata)
        scenes_df = pandas.DataFrame.from_dict(scenes)
        self._products_df_sorted = scenes_df
        gs.message(
            _("{} Sentinel product(s) found").format(
                len(self._products_df_sorted)))

    def set_uuid(self, uuid_list):
        """Set products by uuid.

        TODO: Find better implementation

        :param uuid: uuid to download
        """
        if self._apiname == "USGS_EE":
            self.get_products_from_uuid_usgs(uuid_list)
        else:
            from sentinelsat.sentinel import SentinelAPIError

            self._products_df_sorted = {"uuid": []}
            for uuid in uuid_list:
                try:
                    odata = self._api.get_product_odata(uuid, full=True)
                except SentinelAPIError as e:
                    gs.error(_("{0}. UUID {1} skipped".format(e, uuid)))
                    continue

                for k, v in odata.items():
                    if k == "id":
                        k = "uuid"
                    elif k == "Sensing start":
                        k = "beginposition"
                    elif k == "Product type":
                        k = "producttype"
                    elif k == "Cloud cover percentage":
                        k = "cloudcoverpercentage"
                    elif k == "Identifier":
                        k = "identifier"
                    elif k == "Ingestion Date":
                        k = "ingestiondate"
                    elif k == "footprint":
                        pass
                    else:
                        continue
                    if k not in self._products_df_sorted:
                        self._products_df_sorted[k] = []
                    self._products_df_sorted[k].append(v)

    def filter_USGS(
        self,
        area,
        area_relation,
        clouds=None,
        producttype=None,
        limit=None,
        query={},
        start=None,
        end=None,
        sortby=[],
        asc=True,
        relativeorbitnumber=None,
    ):
        if area_relation != "Intersects":
            gs.fatal(
                _("USGS Earth Explorer only supports area_relation"
                  " 'Intersects'"))
        if relativeorbitnumber:
            gs.fatal(
                _("USGS Earth Explorer does not support 'relativeorbitnumber'"
                  " option."))
        if producttype and producttype != "S2MSI1C":
            gs.fatal(
                _("USGS Earth Explorer only supports producttype S2MSI1C"))
        if query:
            if not any(
                    key in query
                    for key in ["identifier", "filename", "usgs_identifier"]):
                gs.fatal(
                    _("USGS Earth Explorer only supports query options"
                      " 'filename', 'identifier' or 'usgs_identifier'."))
            if "usgs_identifier" in query:
                # get entityId from usgs identifier and directly save results
                usgs_id = query["usgs_identifier"]
                check_s2l1c_identifier(usgs_id, source="usgs")
                # entity_id = self._api.lookup('SENTINEL_2A', [usgs_id],
                #                              inverse=True)
                entity_id = self._api.get_entity_id([usgs_id], "SENTINEL_2A")
                self.get_products_from_uuid_usgs(entity_id)
                return
            else:
                if "filename" in query:
                    esa_id = query["filename"].replace(".SAFE", "")
                else:
                    esa_id = query["identifier"]
                check_s2l1c_identifier(esa_id, source="esa")
                esa_prod_id = esa_id.split("_")[-1]
                utm_tile = esa_id.split("_")[-2]
                acq_date = esa_id.split("_")[2].split("T")[0]
                acq_date_string = "{0}-{1}-{2}".format(acq_date[:4],
                                                       acq_date[4:6],
                                                       acq_date[6:])
                start_date = end_date = acq_date_string
                # build the USGS style S2-identifier
                if utm_tile.startswith("T"):
                    utm_tile_base = utm_tile[1:]
                bbox = get_bbox_from_S2_UTMtile(utm_tile_base)
        else:
            # get coordinate pairs from wkt string
            str_1 = "POLYGON(("
            str_2 = "))"
            coords = area[area.find(str_1) +
                          len(str_1):area.rfind(str_2)].split(",")
            # add one space to first pair for consistency
            coords[0] = " " + coords[0]
            lons = [float(pair.split(" ")[1]) for pair in coords]
            lats = [float(pair.split(" ")[2]) for pair in coords]
            bbox = (min(lons), min(lats), max(lons), max(lats))
            start_date = start
            end_date = end
        usgs_args = {
            "dataset": "SENTINEL_2A",
            "bbox": bbox,
            "start_date": start_date,
            "end_date": end_date,
        }
        if clouds:
            usgs_args["max_cloud_cover"] = clouds
        if limit:
            usgs_args["max_results"] = limit
        scenes = self._api.search(**usgs_args)
        self._api.logout()
        if query:
            # check if the UTM-Tile is correct, remove otherwise
            for scene in scenes:
                if scene["display_id"].split("_")[1] != utm_tile:
                    scenes.remove(scene)
            # remove redundant scene
            if len(scenes) == 2:
                for scene in scenes:
                    prod_id = scene["display_id"].split("_")[-1]
                    if prod_id != esa_prod_id:
                        scenes.remove(scene)
        if len(scenes) < 1:
            gs.message(_("No product found"))
            return
        scenes_df = pandas.DataFrame.from_dict(scenes)
        if sortby:
            # replace sortby keywords with USGS keywords
            for idx, keyword in enumerate(sortby):
                if keyword == "cloudcoverpercentage":
                    sortby[idx] = "cloud_cover"
                    # turn cloudcover to float to make it sortable
                    scenes_df["cloud_cover"] = pandas.to_numeric(
                        scenes_df["cloud_cover"])
                elif keyword == "ingestiondate":
                    sortby[idx] = "acquisition_date"
                # what does sorting by footprint mean
                elif keyword == "footprint":
                    sortby[idx] = "display_id"
            self._products_df_sorted = scenes_df.sort_values(sortby,
                                                             ascending=[asc] *
                                                             len(sortby),
                                                             ignore_index=True)
        else:
            self._products_df_sorted = scenes_df
        gs.message(
            _("{} Sentinel product(s) found").format(
                len(self._products_df_sorted)))