def download_wrapper(args): i, ifgd, n_dl, url_data, path_data = args dir_data = os.path.dirname(path_data) print(' Donwnloading {} ({}/{})...'.format(ifgd, i + 1, n_dl), flush=True) if not os.path.exists(dir_data): os.mkdir(dir_data) tools_lib.download_data(url_data, path_data) return
def main(argv=None): #%% Check argv if argv == None: argv = sys.argv start = time.time() print("{} {}".format(os.path.basename(argv[0]), ' '.join(argv[1:])), flush=True) #%% Set default frameID = [] startdate = 20141001 enddate = int(dt.date.today().strftime("%Y%m%d")) #%% Read options try: try: opts, args = getopt.getopt(argv[1:], "hf:s:e:", ["help"]) except getopt.error as msg: raise Usage(msg) for o, a in opts: if o == '-h' or o == '--help': print(__doc__) return 0 elif o == '-f': frameID = a elif o == '-s': startdate = int(a) elif o == '-e': enddate = int(a) except Usage as err: print("\nERROR:", file=sys.stderr, end='') print(" " + str(err.msg), file=sys.stderr) print("\nFor help, use -h or --help.\n", file=sys.stderr) return 2 #%% Determine frameID wd = os.getcwd() if not frameID: ## if frameID not indicated _tmp = re.findall(r'\d{3}[AD]_\d{5}_\d{6}', wd) ##e.g., 021D_04972_131213 if len(_tmp) == 0: print('\nFrame ID cannot be identified from dir name!', file=sys.stderr) print('Use -f option', file=sys.stderr) return else: frameID = _tmp[0] print('\nFrame ID is {}\n'.format(frameID), flush=True) trackID = str(int(frameID[0:3])) #%% Directory and file setting outdir = os.path.join(wd, 'GEOC') if not os.path.exists(outdir): os.mkdir(outdir) os.chdir(outdir) LiCSARweb = 'http://gws-access.ceda.ac.uk/public/nceo_geohazards/LiCSAR_products/' #%% ENU for ENU in ['E', 'N', 'U']: enutif = '{}.geo.{}.tif'.format(frameID, ENU) if os.path.exists(enutif): print('{} already exist. Skip download.'.format(enutif), flush=True) continue print('Download {}'.format(enutif), flush=True) url = os.path.join(LiCSARweb, trackID, frameID, 'metadata', enutif) if not tools_lib.download_data(url, enutif): print(' Error while downloading from {}'.format(url), file=sys.stderr, flush=True) continue #%% baselines print('Download baselines', flush=True) url = os.path.join(LiCSARweb, trackID, frameID, 'metadata', 'baselines') if not tools_lib.download_data(url, 'baselines'): print(' Error while downloading from {}'.format(url), file=sys.stderr, flush=True) #%% unw and cc ### Get available dates print('\nDownload geotiff of unw and cc', flush=True) url = os.path.join(LiCSARweb, trackID, frameID, 'products') response = requests.get(url) response.encoding = response.apparent_encoding #avoid garble html_doc = response.text soup = BeautifulSoup(html_doc, "html.parser") tags = soup.find_all(href=re.compile(r"\d{8}_\d{8}")) ifgdates_all = [tag.get("href")[0:17] for tag in tags] ### Extract during start_date to end_date ifgdates = [] for ifgd in ifgdates_all: mimd = int(ifgd[:8]) simd = int(ifgd[-8:]) if mimd >= startdate and simd <= enddate: ifgdates.append(ifgd) n_ifg = len(ifgdates) imdates = tools_lib.ifgdates2imdates(ifgdates) print('{} IFGs available from {} to {}'.format(n_ifg, imdates[0], imdates[-1]), flush=True) ### Download for i, ifgd in enumerate(ifgdates): print(' Donwnloading {} ({}/{})...'.format(ifgd, i + 1, n_ifg), flush=True) url_unw = os.path.join(url, ifgd, ifgd + '.geo.unw.tif') path_unw = os.path.join(ifgd, ifgd + '.geo.unw.tif') if not os.path.exists(ifgd): os.mkdir(ifgd) if os.path.exists(path_unw): print(' {}.geo.unw.tif already exist. Skip'.format(ifgd), flush=True) elif not tools_lib.download_data(url_unw, path_unw): print(' Error while downloading from {}'.format(url_unw), file=sys.stderr, flush=True) url_cc = os.path.join(url, ifgd, ifgd + '.geo.cc.tif') path_cc = os.path.join(ifgd, ifgd + '.geo.cc.tif') if os.path.exists(path_cc): print(' {}.geo.cc.tif already exist. Skip.'.format(ifgd), flush=True) if not tools_lib.download_data(url_cc, path_cc): print(' Error while downloading from {}'.format(url_cc), file=sys.stderr, flush=True) #%% Finish elapsed_time = time.time() - start hour = int(elapsed_time / 3600) minite = int(np.mod((elapsed_time / 60), 60)) sec = int(np.mod(elapsed_time, 60)) print("\nElapsed time: {0:02}h {1:02}m {2:02}s".format(hour, minite, sec)) print('\n{} Successfully finished!!\n'.format(os.path.basename(argv[0]))) print('Output directory: {}\n'.format(outdir))
def main(argv=None): #%% Check argv if argv == None: argv = sys.argv start = time.time() ver = 1.2 date = 20200227 author = "Y. Morishita" print("\n{} ver{} {} {}".format(os.path.basename(argv[0]), ver, date, author), flush=True) print("{} {}".format(os.path.basename(argv[0]), ' '.join(argv[1:])), flush=True) #%% Set default frameID = [] startdate = 20141001 enddate = int(dt.date.today().strftime("%Y%m%d")) get_gacos = False #%% Read options try: try: opts, args = getopt.getopt(argv[1:], "hf:s:e:", ["help", "get_gacos"]) except getopt.error as msg: raise Usage(msg) for o, a in opts: if o == '-h' or o == '--help': print(__doc__) return 0 elif o == '-f': frameID = a elif o == '-s': startdate = int(a) elif o == '-e': enddate = int(a) elif o == '--get_gacos': get_gacos = True except Usage as err: print("\nERROR:", file=sys.stderr, end='') print(" " + str(err.msg), file=sys.stderr) print("\nFor help, use -h or --help.\n", file=sys.stderr) return 2 #%% Determine frameID wd = os.getcwd() if not frameID: ## if frameID not indicated _tmp = re.findall(r'\d{3}[AD]_\d{5}_\d{6}', wd) ##e.g., 021D_04972_131213 if len(_tmp) == 0: print('\nFrame ID cannot be identified from dir name!', file=sys.stderr) print('Use -f option', file=sys.stderr) return else: frameID = _tmp[0] print('\nFrame ID is {}\n'.format(frameID), flush=True) trackID = str(int(frameID[0:3])) #%% Directory and file setting outdir = os.path.join(wd, 'GEOC') if not os.path.exists(outdir): os.mkdir(outdir) os.chdir(outdir) LiCSARweb = 'http://gws-access.ceda.ac.uk/public/nceo_geohazards/LiCSAR_products/' #%% ENU and hgt for ENU in ['E', 'N', 'U', 'hgt']: enutif = '{}.geo.{}.tif'.format(frameID, ENU) if os.path.exists(enutif): print('{} already exist. Skip download.'.format(enutif), flush=True) continue print('Download {}'.format(enutif), flush=True) url = os.path.join(LiCSARweb, trackID, frameID, 'metadata', enutif) if not tools_lib.download_data(url, enutif): print(' Error while downloading from {}'.format(url), file=sys.stderr, flush=True) continue #%% baselines and metadata.txt print('Download baselines', flush=True) url = os.path.join(LiCSARweb, trackID, frameID, 'metadata', 'baselines') if not tools_lib.download_data(url, 'baselines'): print(' Error while downloading from {}'.format(url), file=sys.stderr, flush=True) print('Download metadata.txt', flush=True) url = os.path.join(LiCSARweb, trackID, frameID, 'metadata', 'metadata.txt') if not tools_lib.download_data(url, 'metadata.txt'): print(' Error while downloading from {}'.format(url), file=sys.stderr, flush=True) #%% mli ### Get available dates url = os.path.join(LiCSARweb, trackID, frameID, 'products', 'epochs') response = requests.get(url) if not response.ok: ## Try new structure url = os.path.join(LiCSARweb, trackID, frameID, 'epochs') response = requests.get(url) response.encoding = response.apparent_encoding #avoid garble html_doc = response.text soup = BeautifulSoup(html_doc, "html.parser") tags = soup.find_all(href=re.compile(r"\d{8}")) imdates_all = [tag.get("href")[0:8] for tag in tags] _imdates = np.int32(np.array(imdates_all)) _imdates = (_imdates[(_imdates >= startdate) * (_imdates <= enddate)]).astype('str').tolist() ## Find earliest date in which mli is available imd1 = [] for imd in _imdates: url_mli = os.path.join(url, imd, imd + '.geo.mli.tif') response = requests.get(url_mli) if response.ok: imd1 = imd break ### Download if imd1: print('Donwnloading {}.geo.mli.tif as {}.geo.mli.tif...'.format( imd1, frameID), flush=True) url_mli = os.path.join(url, imd1, imd1 + '.geo.mli.tif') mlitif = frameID + '.geo.mli.tif' if os.path.exists(mlitif): print(' {} already exist. Skip'.format(mlitif), flush=True) elif not tools_lib.download_data(url_mli, mlitif): print(' Error while downloading from {}'.format(url_mli), file=sys.stderr, flush=True) else: print('No mli available on {}'.format(url), file=sys.stderr, flush=True) #%% GACOS if specified if get_gacos: gacosdir = os.path.join(wd, 'GACOS') if not os.path.exists(gacosdir): os.mkdir(gacosdir) ### Get available dates print('\nDownload GACOS data', flush=True) url = os.path.join(LiCSARweb, trackID, frameID, 'epochs') response = requests.get(url) response.encoding = response.apparent_encoding #avoid garble html_doc = response.text soup = BeautifulSoup(html_doc, "html.parser") tags = soup.find_all(href=re.compile(r"\d{8}")) imdates_all = [tag.get("href")[0:8] for tag in tags] _imdates = np.int32(np.array(imdates_all)) _imdates = (_imdates[(_imdates >= startdate) * (_imdates <= enddate)]).astype('str').tolist() ### Extract available dates imdates = [] for imd in _imdates: url_sltd = os.path.join(url, imd, imd + '.sltd.geo.tif') response = requests.get(url_sltd) if response.ok: imdates.append(imd) n_im = len(imdates) if n_im > 0: print('{} GACOS data available from {} to {}'.format( n_im, imdates[0], imdates[-1]), flush=True) else: print('No GACOS data available from {} to {}'.format( startdate, enddate), flush=True) ### Download for i, imd in enumerate(imdates): print(' Donwnloading {} ({}/{})...'.format(imd, i + 1, n_im), flush=True) url_sltd = os.path.join(url, imd, imd + '.sltd.geo.tif') path_sltd = os.path.join(gacosdir, imd + '.sltd.geo.tif') if os.path.exists(path_sltd): print(' {}.sltd.geo.tif already exist. Skip'.format(imd), flush=True) elif not tools_lib.download_data(url_sltd, path_sltd): print(' Error while downloading from {}'.format(url_sltd), file=sys.stderr, flush=True) #%% unw and cc ### Get available dates print('\nDownload geotiff of unw and cc', flush=True) url = os.path.join(LiCSARweb, trackID, frameID, 'products') response = requests.get(url) if not response.ok: ## Try new structure url = os.path.join(LiCSARweb, trackID, frameID, 'interferograms') response = requests.get(url) response.encoding = response.apparent_encoding #avoid garble html_doc = response.text soup = BeautifulSoup(html_doc, "html.parser") tags = soup.find_all(href=re.compile(r"\d{8}_\d{8}")) ifgdates_all = [tag.get("href")[0:17] for tag in tags] ### Extract during start_date to end_date ifgdates = [] for ifgd in ifgdates_all: mimd = int(ifgd[:8]) simd = int(ifgd[-8:]) if mimd >= startdate and simd <= enddate: ifgdates.append(ifgd) n_ifg = len(ifgdates) imdates = tools_lib.ifgdates2imdates(ifgdates) print('{} IFGs available from {} to {}'.format(n_ifg, imdates[0], imdates[-1]), flush=True) ### Download for i, ifgd in enumerate(ifgdates): print(' Donwnloading {} ({}/{})...'.format(ifgd, i + 1, n_ifg), flush=True) url_unw = os.path.join(url, ifgd, ifgd + '.geo.unw.tif') path_unw = os.path.join(ifgd, ifgd + '.geo.unw.tif') if not os.path.exists(ifgd): os.mkdir(ifgd) if os.path.exists(path_unw): print(' {}.geo.unw.tif already exist. Skip'.format(ifgd), flush=True) elif not tools_lib.download_data(url_unw, path_unw): print(' Error while downloading from {}'.format(url_unw), file=sys.stderr, flush=True) url_cc = os.path.join(url, ifgd, ifgd + '.geo.cc.tif') path_cc = os.path.join(ifgd, ifgd + '.geo.cc.tif') if os.path.exists(path_cc): print(' {}.geo.cc.tif already exist. Skip.'.format(ifgd), flush=True) if not tools_lib.download_data(url_cc, path_cc): print(' Error while downloading from {}'.format(url_cc), file=sys.stderr, flush=True) #%% Finish elapsed_time = time.time() - start hour = int(elapsed_time / 3600) minite = int(np.mod((elapsed_time / 60), 60)) sec = int(np.mod(elapsed_time, 60)) print("\nElapsed time: {0:02}h {1:02}m {2:02}s".format(hour, minite, sec)) print('\n{} Successfully finished!!\n'.format(os.path.basename(argv[0]))) print('Output directory: {}\n'.format(outdir))
def main(argv=None): #%% Check argv if argv == None: argv = sys.argv start = time.time() ver = 1.6 date = 20200911 author = "Y. Morishita" print("\n{} ver{} {} {}".format(os.path.basename(argv[0]), ver, date, author), flush=True) print("{} {}".format(os.path.basename(argv[0]), ' '.join(argv[1:])), flush=True) #%% Set default frameID = [] startdate = 20141001 enddate = int(dt.date.today().strftime("%Y%m%d")) get_gacos = False n_para = 4 #%% Read options try: try: opts, args = getopt.getopt(argv[1:], "hf:s:e:", ["help", "get_gacos", "n_para="]) except getopt.error as msg: raise Usage(msg) for o, a in opts: if o == '-h' or o == '--help': print(__doc__) return 0 elif o == '-f': frameID = a elif o == '-s': startdate = int(a) elif o == '-e': enddate = int(a) elif o == '--get_gacos': get_gacos = True elif o == '--n_para': n_para = int(a) except Usage as err: print("\nERROR:", file=sys.stderr, end='') print(" " + str(err.msg), file=sys.stderr) print("\nFor help, use -h or --help.\n", file=sys.stderr) return 2 #%% Determine frameID wd = os.getcwd() if not frameID: ## if frameID not indicated _tmp = re.findall(r'\d{3}[AD]_\d{5}_\d{6}', wd) ##e.g., 021D_04972_131213 if len(_tmp) == 0: print('\nFrame ID cannot be identified from dir name!', file=sys.stderr) print('Use -f option', file=sys.stderr) return else: frameID = _tmp[0] print('\nFrame ID is {}\n'.format(frameID), flush=True) else: print('\nFrame ID is {}\n'.format(frameID), flush=True) trackID = str(int(frameID[0:3])) #%% Directory and file setting outdir = os.path.join(wd, 'GEOC') if not os.path.exists(outdir): os.mkdir(outdir) os.chdir(outdir) LiCSARweb = 'http://gws-access.ceda.ac.uk/public/nceo_geohazards/LiCSAR_products/' #%% ENU and hgt for ENU in ['E', 'N', 'U', 'hgt']: enutif = '{}.geo.{}.tif'.format(frameID, ENU) url = os.path.join(LiCSARweb, trackID, frameID, 'metadata', enutif) if os.path.exists(enutif): rc = tools_lib.comp_size_time(url, enutif) if rc == 0: print('{} already exist. Skip download.'.format(enutif), flush=True) continue elif rc == 3: print('{} not available. Skip download.'.format(enutif), flush=True) continue else: if rc == 1: print("Size of {} is not identical.".format(enutif)) elif rc == 2: print("Newer {} available.".format(enutif)) print('Download {}'.format(enutif), flush=True) tools_lib.download_data(url, enutif) #%% baselines and metadata.txt print('Download baselines', flush=True) url = os.path.join(LiCSARweb, trackID, frameID, 'metadata', 'baselines') tools_lib.download_data(url, 'baselines') print('Download metadata.txt', flush=True) url = os.path.join(LiCSARweb, trackID, frameID, 'metadata', 'metadata.txt') tools_lib.download_data(url, 'metadata.txt') #%% mli mlitif = frameID + '.geo.mli.tif' if os.path.exists(mlitif): print('{} already exist. Skip.'.format(mlitif), flush=True) else: ### Get available dates print('Searching earliest epoch for mli...', flush=True) url = os.path.join(LiCSARweb, trackID, frameID, 'epochs') response = requests.get(url) response.encoding = response.apparent_encoding #avoid garble html_doc = response.text soup = BeautifulSoup(html_doc, "html.parser") tags = soup.find_all(href=re.compile(r"\d{8}")) imdates_all = [tag.get("href")[0:8] for tag in tags] _imdates = np.int32(np.array(imdates_all)) _imdates = (_imdates[(_imdates >= startdate) * (_imdates <= enddate)]).astype('str').tolist() ## Find earliest date in which mli is available imd1 = [] for i, imd in enumerate(_imdates): if np.mod(i, 10) == 0: print("\r {0:3}/{1:3}".format(i, len(_imdates)), end='', flush=True) url_epoch = os.path.join(url, imd) response = requests.get(url_epoch) response.encoding = response.apparent_encoding #avoid garble html_doc = response.text soup = BeautifulSoup(html_doc, "html.parser") tag = soup.find(href=re.compile(r"\d{8}.geo.mli.tif")) if tag is not None: print('\n{} found as earliest.'.format(imd)) imd1 = imd break ### Download if imd1: print('Donwnloading {}.geo.mli.tif as {}.geo.mli.tif...'.format( imd1, frameID), flush=True) url_mli = os.path.join(url, imd1, imd1 + '.geo.mli.tif') tools_lib.download_data(url_mli, mlitif) else: print('\nNo mli available on {}'.format(url), file=sys.stderr, flush=True) #%% GACOS if specified if get_gacos: gacosdir = os.path.join(wd, 'GACOS') if not os.path.exists(gacosdir): os.mkdir(gacosdir) ### Get available dates print('\nDownload GACOS data', flush=True) url = os.path.join(LiCSARweb, trackID, frameID, 'epochs') response = requests.get(url) response.encoding = response.apparent_encoding #avoid garble html_doc = response.text soup = BeautifulSoup(html_doc, "html.parser") tags = soup.find_all(href=re.compile(r"\d{8}")) imdates_all = [tag.get("href")[0:8] for tag in tags] _imdates = np.int32(np.array(imdates_all)) _imdates = (_imdates[(_imdates >= startdate) * (_imdates <= enddate)]).astype('str').tolist() print(' There are {} epochs from {} to {}'.format( len(_imdates), startdate, enddate), flush=True) ### Extract available dates print(' Searching available epochs ({} parallel)...'.format(n_para), flush=True) args = [(i, len(_imdates), os.path.join(url, imd, '{}.sltd.geo.tif'.format(imd)), os.path.join(gacosdir, imd + '.sltd.geo.tif')) for i, imd in enumerate(_imdates)] p = multi.Pool(n_para) rc = p.map(check_gacos_wrapper, args) p.close() n_im_existing = 0 n_im_unavailable = 0 imdates_dl = [] for i, rc1 in enumerate(rc): if rc1 == 0: ## No need to download n_im_existing = n_im_existing + 1 if rc1 == 3 or rc1 == 5: ## Can not download n_im_unavailable = n_im_unavailable + 1 elif rc1 == 1 or rc1 == 2 or rc1 == 4: ## Need download imdates_dl.append(_imdates[i]) n_im_dl = len(imdates_dl) if n_im_existing > 0: print(' {} GACOS data already downloaded'.format(n_im_existing), flush=True) if n_im_unavailable > 0: print(' {} GACOS data unavailable'.format(n_im_unavailable), flush=True) ### Download if n_im_dl > 0: print('{} GACOS data will be downloaded'.format(n_im_dl), flush=True) print('Download GACOS ({} parallel)...'.format(n_para), flush=True) ### Download args = [(i, imd, n_im_dl, os.path.join(url, imd, '{}.sltd.geo.tif'.format(imd)), os.path.join(gacosdir, '{}.sltd.geo.tif'.format(imd))) for i, imd in enumerate(imdates_dl)] p = multi.Pool(n_para) p.map(download_wrapper, args) p.close() else: print('No GACOS data available from {} to {}'.format( startdate, enddate), flush=True) #%% unw and cc ### Get available dates print('\nDownload geotiff of unw and cc', flush=True) url_ifgdir = os.path.join(LiCSARweb, trackID, frameID, 'interferograms') response = requests.get(url_ifgdir) response.encoding = response.apparent_encoding #avoid garble html_doc = response.text soup = BeautifulSoup(html_doc, "html.parser") tags = soup.find_all(href=re.compile(r"\d{8}_\d{8}")) ifgdates_all = [tag.get("href")[0:17] for tag in tags] ### Extract during start_date to end_date ifgdates = [] for ifgd in ifgdates_all: mimd = int(ifgd[:8]) simd = int(ifgd[-8:]) if mimd >= startdate and simd <= enddate: ifgdates.append(ifgd) n_ifg = len(ifgdates) imdates = tools_lib.ifgdates2imdates(ifgdates) print('{} IFGs available from {} to {}'.format(n_ifg, imdates[0], imdates[-1]), flush=True) ### Check if both unw and cc already donwloaded, new, and same size print( 'Checking existing unw and cc ({} parallel, may take time)...'.format( n_para), flush=True) ## unw args = [(i, n_ifg, os.path.join(url_ifgdir, ifgd, '{}.geo.unw.tif'.format(ifgd)), os.path.join(ifgd, '{}.geo.unw.tif'.format(ifgd))) for i, ifgd in enumerate(ifgdates)] p = multi.Pool(n_para) rc = p.map(check_exist_wrapper, args) p.close() n_unw_existing = 0 unwdates_dl = [] for i, rc1 in enumerate(rc): if rc1 == 0: ## No need to download n_unw_existing = n_unw_existing + 1 if rc1 == 3 or rc1 == 5: ## Can not download print(' {}.geo.unw.tif not available.'.format(ifgdates[i]), flush=True) elif rc1 == 1 or rc1 == 2 or rc1 == 4: ## Need download unwdates_dl.append(ifgdates[i]) ## cc args = [(i, n_ifg, os.path.join(url_ifgdir, ifgd, '{}.geo.cc.tif'.format(ifgd)), os.path.join(ifgd, '{}.geo.cc.tif'.format(ifgd))) for i, ifgd in enumerate(ifgdates)] p = multi.Pool(n_para) rc = p.map(check_exist_wrapper, args) p.close() n_cc_existing = 0 ccdates_dl = [] for i, rc1 in enumerate(rc): if rc1 == 0: ## No need to download n_cc_existing = n_cc_existing + 1 if rc1 == 3 or rc1 == 5: ## Can not download print(' {}.geo.cc.tif not available.'.format(ifgdates[i]), flush=True) elif rc1 == 1 or rc1 == 2 or rc1 == 4: ## Need download ccdates_dl.append(ifgdates[i]) n_unw_dl = len(unwdates_dl) n_cc_dl = len(ccdates_dl) print('{} unw already downloaded'.format(n_unw_existing), flush=True) print('{} unw will be downloaded'.format(n_unw_dl), flush=True) print('{} cc already downloaded'.format(n_cc_existing), flush=True) print('{} cc will be downloaded'.format(n_cc_dl), flush=True) ### Download unw with parallel if n_unw_dl != 0: print('Download unw ({} parallel)...'.format(n_para), flush=True) args = [(i, ifgd, n_unw_dl, os.path.join(url_ifgdir, ifgd, '{}.geo.unw.tif'.format(ifgd)), os.path.join(ifgd, '{}.geo.unw.tif'.format(ifgd))) for i, ifgd in enumerate(unwdates_dl)] p = multi.Pool(n_para) p.map(download_wrapper, args) p.close() ### Download cc with parallel if n_cc_dl != 0: print('Download cc ({} parallel)...'.format(n_para), flush=True) args = [(i, ifgd, n_cc_dl, os.path.join(url_ifgdir, ifgd, '{}.geo.cc.tif'.format(ifgd)), os.path.join(ifgd, '{}.geo.cc.tif'.format(ifgd))) for i, ifgd in enumerate(ccdates_dl)] p = multi.Pool(n_para) p.map(download_wrapper, args) p.close() #%% Finish elapsed_time = time.time() - start hour = int(elapsed_time / 3600) minite = int(np.mod((elapsed_time / 60), 60)) sec = int(np.mod(elapsed_time, 60)) print("\nElapsed time: {0:02}h {1:02}m {2:02}s".format(hour, minite, sec)) print('\n{} Successfully finished!!\n'.format(os.path.basename(argv[0]))) print('Output directory: {}\n'.format(outdir))
def main(argv=None): #%% Check argv if argv == None: argv = sys.argv start = time.time() ver = "1.6" date = 20201008 author = "Y. Morishita" print("\n{} ver{} {} {}".format(os.path.basename(argv[0]), ver, date, author), flush=True) print("{} {}".format(os.path.basename(argv[0]), ' '.join(argv[1:])), flush=True) ### For parallel processing global ifgdates2, geocdir, outdir, nlook, n_valid_thre, cycle, cmap #%% Set default geocdir = [] outdir = [] nlook = 1 frameID = [] radar_freq = 5.405e9 n_para = len(os.sched_getaffinity(0)) cmap = 'insar' cycle = 3 n_valid_thre = 0.5 #%% Read options try: try: opts, args = getopt.getopt(argv[1:], "hi:o:n:f:", ["help", "freq=", "n_para="]) except getopt.error as msg: raise Usage(msg) for o, a in opts: if o == '-h' or o == '--help': print(__doc__) return 0 elif o == '-i': geocdir = a elif o == '-o': outdir = a elif o == '-n': nlook = int(a) elif o == '-f': frameID = a elif o == '--freq': radar_freq = float(a) elif o == '--n_para': n_para = int(a) if not geocdir: raise Usage('No GEOC directory given, -d is not optional!') elif not os.path.isdir(geocdir): raise Usage('No {} dir exists!'.format(geocdir)) except Usage as err: print("\nERROR:", file=sys.stderr, end='') print(" " + str(err.msg), file=sys.stderr) print("\nFor help, use -h or --help.\n", file=sys.stderr) return 2 #%% Directory and file setting geocdir = os.path.abspath(geocdir) if not outdir: outdir = os.path.join(os.path.dirname(geocdir), 'GEOCml{}'.format(nlook)) if not os.path.exists(outdir): os.mkdir(outdir) mlipar = os.path.join(outdir, 'slc.mli.par') dempar = os.path.join(outdir, 'EQA.dem_par') no_unw_list = os.path.join(outdir, 'no_unw_list.txt') if os.path.exists(no_unw_list): os.remove(no_unw_list) bperp_file_in = os.path.join(geocdir, 'baselines') bperp_file_out = os.path.join(outdir, 'baselines') metadata_file = os.path.join(geocdir, 'metadata.txt') if os.path.exists(metadata_file): center_time = subp.check_output(['grep', 'center_time', metadata_file ]).decode().split('=')[1].strip() else: center_time = None LiCSARweb = 'http://gws-access.ceda.ac.uk/public/nceo_geohazards/LiCSAR_products/' ### Frame ID even if not used if not frameID: ## if not specified _tmp = re.findall(r'\d{3}[AD]_\d{5}_\d{6}', geocdir) ##e.g., 021D_04972_131213 if len(_tmp) != 0: ## if not found, keep [] frameID = _tmp[0] trackID = str(int(frameID[0:3])) else: trackID = str(int(frameID[0:3])) #%% ENU for ENU in ['E', 'N', 'U']: print('\nCreate {}'.format(ENU + '.geo'), flush=True) enutif = glob.glob(os.path.join(geocdir, '*.geo.{}.tif'.format(ENU))) ### Download if not exist if len(enutif) == 0: print(' No *.geo.{}.tif found in {}'.format( ENU, os.path.basename(geocdir)), flush=True) if not frameID: ## if frameID not found above print(' Frame ID cannot be identified from dir name!', file=sys.stderr) print(' Use -f option if you need {}.geo'.format(ENU), file=sys.stderr) continue ### Download tif url = os.path.join(LiCSARweb, trackID, frameID, 'metadata', '{}.geo.{}.tif'.format(frameID, ENU)) enutif = os.path.join(geocdir, '{}.geo.{}.tif'.format(frameID, ENU)) if not tools_lib.download_data(url, enutif): print(' Error while downloading from {}'.format(url), file=sys.stderr, flush=True) continue else: print(' {} dowonloaded from LiCSAR-portal'.format( os.path.basename(url)), flush=True) else: enutif = enutif[0] ## first one ### Create float data = gdal.Open(enutif).ReadAsArray() data[data == 0] = np.nan if nlook != 1: ### Multilook data = tools_lib.multilook(data, nlook, nlook) outfile = os.path.join(outdir, ENU + '.geo') data.tofile(outfile) print(' {}.geo created'.format(ENU), flush=True) #%% mli mlitif = os.path.join(geocdir, '{}.geo.mli.tif'.format(frameID)) if os.path.exists(mlitif): print('\nCreate slc.mli', flush=True) mli = gdal.Open(mlitif).ReadAsArray() mli[mli == 0] = np.nan if nlook != 1: ### Multilook mli = tools_lib.multilook(mli, nlook, nlook) mlifile = os.path.join(outdir, 'slc.mli') mli.tofile(mlifile) mlipngfile = mlifile + '.png' vmin = np.nanpercentile(mli, 5) vmax = np.nanpercentile(mli, 95) plot_lib.make_im_png(mli, mlipngfile, 'gray', 'MLI', vmin, vmax, cbar=True) print(' slc.mli[.png] created', flush=True) #%% hgt hgttif = os.path.join(geocdir, '{}.geo.hgt.tif'.format(frameID)) if os.path.exists(hgttif): print('\nCreate hgt', flush=True) hgt = gdal.Open(hgttif).ReadAsArray() hgt[hgt == 0] = np.nan if nlook != 1: ### Multilook hgt = tools_lib.multilook(hgt, nlook, nlook) hgtfile = os.path.join(outdir, 'hgt') hgt.tofile(hgtfile) hgtpngfile = hgtfile + '.png' vmax = np.nanpercentile(hgt, 99) vmin = -vmax / 3 ## bnecause 1/4 of terrain is blue plot_lib.make_im_png(hgt, hgtpngfile, 'terrain', 'DEM (m)', vmin, vmax, cbar=True) print(' hgt[.png] created', flush=True) #%% tif -> float (with multilook/downsampling) print('\nCreate unw and cc', flush=True) ifgdates = tools_lib.get_ifgdates(geocdir) n_ifg = len(ifgdates) ### First check if float already exist ifgdates2 = [] for i, ifgd in enumerate(ifgdates): ifgdir1 = os.path.join(outdir, ifgd) unwfile = os.path.join(ifgdir1, ifgd + '.unw') ccfile = os.path.join(ifgdir1, ifgd + '.cc') if not (os.path.exists(unwfile) and os.path.exists(ccfile)): ifgdates2.append(ifgd) n_ifg2 = len(ifgdates2) if n_ifg - n_ifg2 > 0: print(" {0:3}/{1:3} unw and cc already exist. Skip".format( n_ifg - n_ifg2, n_ifg), flush=True) if n_ifg2 > 0: if n_para > n_ifg2: n_para = n_ifg2 ### Create float with parallel processing print(' {} parallel processing...'.format(n_para), flush=True) p = multi.Pool(n_para) rc = p.map(convert_wrapper, range(n_ifg2)) p.close() ifgd_ok = [] for i, _rc in enumerate(rc): if _rc == 1: with open(no_unw_list, 'a') as f: print('{}'.format(ifgdates2[i]), file=f) elif _rc == 0: ifgd_ok = ifgdates2[i] ## readable tiff ### Read info ## If all float already exist, this will not be done, but no problem because ## par files should alerady exist! if ifgd_ok: unw_tiffile = os.path.join(geocdir, ifgd_ok, ifgd_ok + '.geo.unw.tif') geotiff = gdal.Open(unw_tiffile) width = geotiff.RasterXSize length = geotiff.RasterYSize lon_w_p, dlon, _, lat_n_p, _, dlat = geotiff.GetGeoTransform() ## lat lon are in pixel registration. dlat is negative lon_w_g = lon_w_p + dlon / 2 lat_n_g = lat_n_p + dlat / 2 ## to grit registration by shifting half pixel inside if nlook != 1: width = int(width / nlook) length = int(length / nlook) dlon = dlon * nlook dlat = dlat * nlook #%% EQA.dem_par, slc.mli.par if not os.path.exists(mlipar): print('\nCreate slc.mli.par', flush=True) # radar_freq = 5.405e9 ## fixed for Sentnel-1 with open(mlipar, 'w') as f: print('range_samples: {}'.format(width), file=f) print('azimuth_lines: {}'.format(length), file=f) print('radar_frequency: {} Hz'.format(radar_freq), file=f) if center_time is not None: print('center_time: {}'.format(center_time), file=f) if not os.path.exists(dempar): print('\nCreate EQA.dem_par', flush=True) text = [ "Gamma DIFF&GEO DEM/MAP parameter file", "title: DEM", "DEM_projection: EQA", "data_format: REAL*4", "DEM_hgt_offset: 0.00000", "DEM_scale: 1.00000", "width: {}".format(width), "nlines: {}".format(length), "corner_lat: {} decimal degrees".format(lat_n_g), "corner_lon: {} decimal degrees".format(lon_w_g), "post_lat: {} decimal degrees".format(dlat), "post_lon: {} decimal degrees".format(dlon), "", "ellipsoid_name: WGS 84", "ellipsoid_ra: 6378137.000 m", "ellipsoid_reciprocal_flattening: 298.2572236", "", "datum_name: WGS 1984", "datum_shift_dx: 0.000 m", "datum_shift_dy: 0.000 m", "datum_shift_dz: 0.000 m", "datum_scale_m: 0.00000e+00", "datum_rotation_alpha: 0.00000e+00 arc-sec", "datum_rotation_beta: 0.00000e+00 arc-sec", "datum_rotation_gamma: 0.00000e+00 arc-sec", "datum_country_list: Global Definition, WGS84, World\n" ] with open(dempar, 'w') as f: f.write('\n'.join(text)) #%% bperp print('\nCopy baselines file', flush=True) imdates = tools_lib.ifgdates2imdates(ifgdates) if os.path.exists(bperp_file_in): ## Check exisiting bperp_file if not io_lib.read_bperp_file(bperp_file_in, imdates): print(' baselines file found, but not complete. Make dummy', flush=True) io_lib.make_dummy_bperp(bperp_file_out, imdates) else: shutil.copyfile(bperp_file_in, bperp_file_out) else: print(' No valid baselines exists.', flush=True) if not frameID: ## if frameID not found above print(' Frame ID cannot be identified from dir name!') print(' Make dummy.', flush=True) io_lib.make_dummy_bperp(bperp_file_out, imdates) else: print(' Try download.', flush=True) url = os.path.join(LiCSARweb, trackID, frameID, 'metadata', 'baselines') if not tools_lib.download_data(url, bperp_file_out): print( ' Error while downloading from {}.\n Make dummy.'.format( url), file=sys.stderr, flush=True) io_lib.make_dummy_bperp(bperp_file_out, imdates) else: print(' {} dowonloaded from LiCSAR-portal'.format( os.path.basename(url)), flush=True) if not io_lib.read_bperp_file(bperp_file_out, imdates): print(' but not complete. Make dummy.', flush=True) io_lib.make_dummy_bperp(bperp_file_out, imdates) #%% Finish elapsed_time = time.time() - start hour = int(elapsed_time / 3600) minite = int(np.mod((elapsed_time / 60), 60)) sec = int(np.mod(elapsed_time, 60)) print("\nElapsed time: {0:02}h {1:02}m {2:02}s".format(hour, minite, sec)) print('\n{} Successfully finished!!\n'.format(os.path.basename(argv[0]))) print('Output directory: {}\n'.format(os.path.relpath(outdir)))