def grab_subscription(api: API) -> Dict: """ Displays all of the user's HYP3 subscriptions, the user then selects which subscription they would like to access. The function then returns that subscription. """ data = api.get_subscriptions() if not data: print("This account has no subscriptions.") exit() for subsciption in data: print(f"ID: {subsciption['id']}: {subsciption['name']}") print('Pick an id from the list above: ', end='') while True: try: user_input = int(input()) except ValueError: print('Please insert an integer from above: ', end='') continue for subscription in data: if int(subscription['id']) == user_input: return subscription else: continue # Prints if the ID wasn't a choice in the users API print("That id wasn't an option, please try again: ", end='')
def hyp3_login() -> API: """ Takes users information to log in to NASA EarthData, updates .netrc with user's credentials then returns an API object. """ username = "" try: with open('.netrc', 'r') as f: contents = f.read() username = contents.split(' ')[3] password = contents.split(' ')[5].split('\n')[0] except IndexError and FileNotFoundError: pass error = None while True: if error: print(error) print('Please try agian.\n') if username == "": print("Enter your NASA EarthData username: "******"machine urs.earthdata.nasa.gov login {username} password {password}\n" ) return api
def __init__(self, username=None, password=None): """ takes user input to login to NASA Earthdata updates .netrc with user credentials returns an api object note: Earthdata's EULA applies when accessing ASF APIs Hyp3 API handles HTTPError and LoginError """ err = None while True: if err: # Jupyter input handling requires printing login error here to maintain correct order of output. print(err) print("Please Try again.\n") if not username or not password: print(f"Enter your NASA EarthData username:"******"Enter your password:"******"Login successful.") print(f"Welcome {username}.") self.username = username self.password = password self.api = api break
def mask_sub(sub_id: str, dir: str, model: str, api: API) -> None: """ mask_sub masks a given subscription """ count = 0 while True: print(f"Page: {count + 1}") products = api.get_products(sub_id=sub_id, page=count, page_size=500) mask_products(products, dir, model) count += 1 if not products: break
def earthdata_hyp3_login(): """ takes user input to login to NASA Earthdata updates .netrc with user credentials returns an api object note: Earthdata's EULA applies when accessing ASF APIs Hyp3 API handles HTTPError and LoginError """ err = None while True: if err: # Jupyter input handling requires printing login error here to maintain correct order of output. print(err) print("Please Try again.\n") print(f"Enter your NASA EarthData username:"******"Enter your password:"******"Login successful.") print(f"Welcome {username}.") filename = "/home/jovyan/.netrc" with open(filename, 'w+') as f: f.write( f"machine urs.earthdata.nasa.gov login {username} password {password}\n" ) return api
def get_subscription_products_info(subscription_id: int, api_object: API) -> list: products = [] page_count = 0 while True: product_page = api_object.get_products(sub_id=subscription_id, page=page_count, page_size=100) page_count += 1 if not product_page: break for product in product_page: products.append(product) return products
def get_hyp3_subscriptions(hyp3_api_object: API) -> dict: """ Takes a Hyp3 API object and returns a list of enabled associated subscriptions Returns None if there are no enabled subscriptions associated with Hyp3 account. precondition: must already be logged into hyp3 """ assert type(hyp3_api_object) == API, f"Error: get_hyp3_subscriptions was passed a {type(hyp3_api_object)}, not a asf_hyp3.API object" try: subscriptions = hyp3_api_object.get_subscriptions(enabled=True) except Exception: raise else: if not subscriptions: print("There are no subscriptions associated with this Hyp3 account.") return subscriptions
def procS1StackGroupsRTC(outfile=None, infiles=None, path=None, res=None, filter=False, type='dB-byte', scale=[-40, 0], clip=None, shape=None, overlap=False, zipFlag=False, leave=False, thresh=0.4, font=24, hyp=None, keep=None, group=False, aws=None, inamp=False, exclude=False, dates=None, delay=50): if outfile is not None: logFile = "{}_log.txt".format(outfile) else: logFile = "run_log.txt" outfile = "animation" logging.info( "***********************************************************************************" ) logging.info(" STARTING RUN {}".format(outfile)) logging.info( "***********************************************************************************" ) printParameters(outfile, infiles, path, res, filter, type, scale, clip, shape, overlap, zipFlag, leave, thresh, font, hyp, keep, group, aws, inamp, exclude, dates, delay) if hyp: logging.info( "Using Hyp3 subscription named {} to download input files".format( hyp)) username, password = getUsernamePassword() api = API(username) api.login(password=password) download_products(api, sub_name=hyp) hyp = None zipFlag = True path = "hyp3-products" if zipFlag: unzipFiles(path, "hyp3-products-unzipped") zipFlag = False path = "hyp3-products-unzipped" # Catch the case where # no path is specified, # no input files are specified, # we're not using a HyP3 subscription, # we're not using AWS, # and no dates file is specified... if path is None and hyp is None and infiles is None and dates is None and aws is None: path = "hyp3-products-unzipped" if group: if aws is not None: filelist = aws_ls(aws) filelist = filter_file_list(filelist, path, '.tif') for i in xrange(len(filelist)): filelist[i] = "/vsis3/" + aws + "/" + filelist[i] elif (infiles is None or len(infiles) == 0): # Make path into an absolute path if path is not None: if path[0] != "/": root = os.getcwd() path = os.path.join(root, path) if not os.path.isdir(path): logging.error( "ERROR: path {} is not a directory!".format(path)) exit(1) logging.info("Data path is {}".format(path)) else: path = os.getcwd() if zipFlag: filelist = glob.glob("{}/S1*.zip".format(path)) else: filelist = [] logging.debug("Path is {}".format(path)) for myfile in os.listdir(path): if os.path.isdir(os.path.join(path, myfile)): filelist.append(myfile) if len(filelist) == 0: logging.error("ERROR: Unable to find input files") exit(1) classes, filelists = sortByTime(path, filelist, "rtc") logging.debug("aws is {}".format(aws)) for i in range(len(classes)): if len(filelists[i]) > 2: if aws is None: time = classes[i] mydir = "DATA_{}".format(classes[i]) logging.info("Making clean directory {}".format(mydir)) createCleanDir(mydir) for myfile in filelists[i]: newfile = os.path.join(mydir, os.path.basename(myfile)) logging.info("Linking file {} to {}".format( os.path.join(path, myfile), newfile)) os.symlink(os.path.join(path, myfile), newfile) else: mydir = None infiles = filelists[i] output = outfile + "_" + classes[i] procS1StackRTC(outfile=output, infiles=infiles, path=mydir, res=res, filter=filter, type=type, scale=scale, clip=None, shape=None, overlap=True, zipFlag=zipFlag, leave=leave, thresh=thresh, font=font, keep=keep, aws=aws, inamp=inamp, exclude=exclude, datefile=dates, delay=delay) if mydir is not None: shutil.rmtree(mydir) else: procS1StackRTC(outfile=outfile, infiles=infiles, path=path, res=res, filter=filter, type=type, scale=scale, clip=clip, shape=shape, overlap=overlap, zipFlag=zipFlag, leave=leave, thresh=thresh, font=font, keep=keep, aws=aws, inamp=inamp, exclude=exclude, datefile=dates, delay=delay) if not leave and group: for myfile in glob.glob("sorted_*"): shutil.rmtree(myfile) logging.info( "***********************************************************************************" ) logging.info(" END OF RUN {}".format(outfile)) logging.info( "***********************************************************************************" )
def procS1StackGroupsGIANT(type, output, descFile=None, rxy=None, nvalid=0.8, nsbas=False, filt=0.1, path=None, utcTime=None, heading=None, leave=False, train=False, hyp=None, zipFlag=False, group=False, rawFlag=False, mm=None, errorFlag=False, api_key=None): logging.info( "***********************************************************************************" ) logging.info(" STARTING RUN {}".format(output)) logging.info( "***********************************************************************************" ) printParameters(type, output, descFile=descFile, rxy=rxy, nvalid=nvalid, nsbas=nsbas, filt=filt, path=path, utcTime=utcTime, heading=heading, leave=leave, train=train, hyp=hyp, zipFlag=zipFlag, group=group, rawFlag=rawFlag, mm=mm, errorFlag=errorFlag, api_key=api_key) if hyp: logging.info( "Using Hyp3 subscription named {} to download input files".format( hyp)) if api_key is not None: config = configparser.ConfigParser() config.read(api_key) s = 'hyp3-API-credentials' api = API(config.get(s, 'username'), api_key=config.get(s, 'api_key')) else: username, password = getUsernamePassword() api = API(username) api.login(password=password) download_products(api, sub_name=hyp) zipFlag = True path = "hyp3-products" if zipFlag: unzipFiles(path, "hyp3-products-unzipped") zipFlag = False path = "hyp3-products-unzipped" if type == 'hyp' and group: # Make path into an absolute path if path is not None: if os.path.isdir(path): if path[0] != "/": root = os.getcwd() path = os.path.join(root, path) else: logging.error( "ERROR: path {} is not a directory!".format(path)) exit(1) logging.info("Data path is {}".format(path)) else: path = os.getcwd() filelist = [] for myfile in os.listdir(path): if os.path.isdir(os.path.join(path, myfile)): filelist.append(myfile) if len(filelist) == 0: logging.error("ERROR: Unable to find files to process") exit(1) classes, filelists = sortByTime(path, filelist, "insar") for i in range(len(classes)): if len(filelists[i]) > 2: mydir = "DATA_{}".format(classes[i]) createCleanDir(mydir) for myfile in filelists[i]: thisDir = "../sorted_{}".format(classes[i]) inFile = "{}/{}".format(thisDir, os.path.basename(myfile)) outFile = "{}/{}".format(mydir, os.path.basename(myfile)) logging.debug("Linking file {} to {}".format( inFile, outFile)) os.symlink(inFile, outFile) outfile = output + "_" + classes[i] procS1StackGIANT(type, outfile, descFile=descFile, rxy=rxy, nvalid=nvalid, nsbas=nsbas, filt=filt, path=mydir, utcTime=utcTime, heading=heading, leave=leave, train=train, hyp=hyp, rawFlag=rawFlag, mm=mm, errorFlag=errorFlag) shutil.rmtree(mydir) else: procS1StackGIANT(type, output, descFile=descFile, rxy=rxy, nvalid=nvalid, nsbas=nsbas, filt=filt, path=path, utcTime=utcTime, heading=heading, leave=leave, train=train, hyp=hyp, rawFlag=rawFlag, mm=mm, errorFlag=errorFlag) if not leave: if group: for myfile in glob.glob("sorted_*"): shutil.rmtree(myfile) logging.info( "***********************************************************************************" ) logging.info(" END OF RUN {}".format(output)) logging.info( "***********************************************************************************" )
group = parser.add_mutually_exclusive_group() group.add_argument("-s", "--sub_name", help="Name of the subscription to download") group.add_argument("-i", "--id", help="ID of the subscription to download") group.add_argument("-d", "--date", help="Date of the subscription to download") args = parser.parse_args() print("Username: "******"Username is {}".format(username) api = API(username) api.login() download_products(api, directory="hyp3-products/", id=None, sub_id=args.id, sub_name=args.sub_name, creation_date=args.date, verbose=True, threads=0) # download_products(api)
def download_hyp3_products(hyp3_api_object: API, destination_path: str, start_date: datetime.date = None, end_date: datetime.date = None, flight_direction: str = None, path: int = None) -> int: """ Takes a Hyp3 API object and a destination path. Calls pick_hyp3_subscription() and downloads all products associated with the selected subscription. Returns subscription id. preconditions: -must already be logged into hyp3 -destination_path must be valid """ assert type(hyp3_api_object) == API, 'Error: hyp3_api_object must be an asf_hyp3.API object.' assert type(destination_path) == str, 'Error: destination_path must be a string' assert os.path.exists(destination_path), 'Error: desitination_path must be valid' if start_date: assert type(start_date) == datetime.date, 'Error: start_date must be a datetime.date' if end_date: assert type(end_date) == datetime.date, 'Error:, end_date must be a datetime.date' if flight_direction: assert type(flight_direction) == str, 'Error: flight_direction must be a string.' if path: assert type(path) == int, 'Error: path must be an integer.' subscriptions = get_hyp3_subscriptions(hyp3_api_object) subscription_id = pick_hyp3_subscription(subscriptions) if subscription_id: products = [] page_count = 0 product_count = 1 while True: product_page = hyp3_api_object.get_products( sub_id=subscription_id, page=page_count, page_size=100) page_count += 1 if not product_page: break for product in product_page: products.append(product) if date_range_valid(start_date, end_date): products = filter_date_range(products, start_date, end_date) if flight_direction: # must check this because both None and incorrect flight_directions # will return False and it shouldn't exit if flight_direction is None if flight_direction_valid(flight_direction): products = product_filter(products, flight_direction=flight_direction) else: print('Aborting download_hyp3_products() due to invalid flight_direction.') sys.exit(1) if path: products = product_filter(products, path=path) if path_exists(destination_path): print(f"\n{len(products)} products are associated with the selected date range, flight direction, and path for Subscription ID: {subscription_id}") for p in products: print(f"\nProduct Number {product_count}:") product_count += 1 url = p['url'] _match = re.match( r'https://hyp3-download.asf.alaska.edu/asf/data/(.*).zip', url) product = _match.group(1) filename = f"{destination_path}/{product}" # if not already present, we need to download and unzip products if not os.path.exists(filename): print( f"\n{product} is not present.\nDownloading from {url}") r = requests.get(url, stream=True) download(filename, r) print(f"\n") os.rename(filename, f"{filename}.zip") filename = f"{filename}.zip" asf_unzip(destination_path, filename) os.remove(filename) print(f"\nDone.") else: print(f"{filename} already exists.") return subscription_id