def download(self, download_dir, mirror=None): if not mirror: logger.info('One or more of your scenes need to be downloaded.') print(' Select the server from where you want to download:') print(' (1) Copernicus Apihub (ESA, rolling archive)') print(' (2) Alaska Satellite Facility (NASA, full archive)') print(' (3) PEPS (CNES, 1 year rolling archive)') print(' (4) ONDA DIAS (ONDA DIAS full archive for' ' SLC - or GRD from 30 June 2019)') # print(' (5) Alaska Satellite Facility (using WGET' # ' - unstable - use only if 2 fails)') mirror = input(' Type 1, 2, 3, or 4: ') from ost.s1 import download if isinstance(download_dir, str): download_dir = Path(download_dir) if mirror == '1': uname, pword = scihub.ask_credentials() opener = scihub.connect(uname=uname, pword=pword) df = pd.DataFrame({ 'identifier': [self.scene_id], 'uuid': [self.scihub_uuid(opener)] }) elif mirror == '2': uname, pword = asf.ask_credentials() df = pd.DataFrame({'identifier': [self.scene_id]}) elif mirror == '3': uname, pword = peps.ask_credentials() df = pd.DataFrame({ 'identifier': [self.scene_id], 'uuid': [self.peps_uuid(uname=uname, pword=pword)] }) elif mirror == '4': uname, pword = onda.ask_credentials() opener = onda.connect(uname=uname, pword=pword) df = pd.DataFrame({ 'identifier': [self.scene_id], 'uuid': [self.ondadias_uuid(opener)] }) else: raise ValueError('You entered the wrong mirror.') # else: # ASF # df = pd.DataFrame({'identifier': [self.scene_id]}) # download.download_sentinel1(df, download_dir, mirror) # return download.download_sentinel1(df, download_dir, mirror, uname=uname, pword=pword) # delete credentials del uname, pword
def batch_download(inventory_df, download_dir, uname, pword, concurrent=2): from ost import Sentinel1_Scene as S1Scene from ost.helpers import onda # create list of scenes scenes = inventory_df['identifier'].tolist() check, i = False, 1 while check is False and i <= 10: download_list = [] for scene_id in scenes: scene = S1Scene(scene_id) filepath = scene._download_path(download_dir, True) try: uuid = (inventory_df['uuid'][inventory_df['identifier'] == scene_id].tolist()) except KeyError: uuid = scene.ondadias_uuid( opener=onda.connect(uname=uname, pword=pword)) if os.path.exists('{}.downloaded'.format(filepath)): print(' INFO: {} is already downloaded.'.format( scene.scene_id)) else: # create list objects for download download_list.append([uuid[0], filepath, uname, pword]) if download_list: pool = multiprocessing.Pool(processes=concurrent) pool.map(s1_download, download_list) downloaded_scenes = glob.glob( opj(download_dir, 'SAR', '*', '20*', '*', '*', '*.zip.downloaded')) if len(inventory_df['identifier'].tolist()) == len(downloaded_scenes): print(' INFO: All products are downloaded.') check = True else: check = False for scene in scenes: scene = S1Scene(scene) filepath = scene._download_path(download_dir) if os.path.exists('{}.downloaded'.format(filepath)): scenes.remove(scene.scene_id) i += 1
def batch_download(inventory_df, download_dir, uname, pword, concurrent=2): from ost import Sentinel1Scene as S1Scene from ost.helpers import onda # create list of scenes scenes = inventory_df['identifier'].tolist() check, i = False, 1 while check is False and i <= 10: download_list = [] for scene_id in scenes: scene = S1Scene(scene_id) file_path = scene.download_path(download_dir, True) try: uuid = (inventory_df['uuid'][inventory_df['identifier'] == scene_id].tolist()) except KeyError: uuid = scene.ondadias_uuid( opener=onda.connect(uname=uname, pword=pword)) if file_path.with_suffix('.downloaded').exists(): logger.info(f'{scene.scene_id} is already downloaded.') else: # create list objects for download download_list.append([uuid[0], file_path, uname, pword]) if download_list: pool = multiprocessing.Pool(processes=concurrent) pool.map(onda_download, download_list) downloaded_scenes = list(download_dir.glob('**/*.downloaded')) if len(inventory_df['identifier'].tolist()) == len(downloaded_scenes): logger.info('All products are downloaded.') check = True else: check = False for scene in scenes: scene = S1Scene(scene) file_path = scene.download_path(download_dir) if file_path.with_suffix('.downloaded'): scenes.remove(scene.scene_id) i += 1
def download(self, download_dir, mirror=None): if not mirror: print(' INFO: One or more of your scenes need to be downloaded.') print(' Select the server from where you want to download:') print(' (1) Copernicus Apihub (ESA, rolling archive)') print(' (2) Alaska Satellite Facility (NASA, full archive)') print(' (3) PEPS (CNES, 1 year rolling archive)') print(' (4) ONDA DIAS (ONDA DIAS full archive for SLC - or GRD from 30 June 2019)') print(' (5) Alaska Satellite Facility (using WGET - unstable - use only if 2 fails)') mirror = input(' Type 1, 2, 3, 4 or 5: ') from ost.s1 import download if mirror == '1': uname, pword = scihub.ask_credentials() opener = scihub.connect(uname=uname, pword=pword) df = pd.DataFrame( {'identifier': [self.scene_id], 'uuid': [self.scihub_uuid(opener)] } ) elif mirror == '3': uname, pword = peps.ask_credentials() df = pd.DataFrame( {'identifier': [self.scene_id], 'uuid': [self.peps_uuid(uname=uname, pword=pword)] } ) elif mirror == '4': uname, pword = onda.ask_credentials() opener = onda.connect(uname=uname, pword=pword) df = pd.DataFrame( {'identifier': [self.scene_id], 'uuid': [self.ondadias_uuid(opener)] } ) else: # ASF df = pd.DataFrame({'identifier': [self.scene_id]}) download.download_sentinel1(df, download_dir, mirror) return download.download_sentinel1(df, download_dir, mirror, uname=uname, pword=pword) del uname, pword