def checkengine(self, engine=None): if not engine: opts = {} engine = choose_engine(opts) engine.get_input() engine.script = self return engine
def download_public_data(datasets, data_dir='./data/'): """Download public datasets using the EcoData Retriever""" for dataset in datasets: script_list = SCRIPT_LIST() opts = get_opts(script_list, args=['install', dataset, '-e', 's', '-f', 'downloaded_data.sqlite']) script = opts["script"] engine = choose_engine(opts) if isinstance(script, list): for dataset in script: print "=> Installing", dataset.name dataset.download(engine, debug=debug) else: script.download(engine) print "Datasets successfully downloaded."
def download_public_data(datasets, data_dir="./data/"): """Download public datasets using the EcoData Retriever""" from retriever import VERSION, SCRIPT_LIST, ENGINE_LIST from retriever.lib.tools import choose_engine, get_opts for dataset in datasets: script_list = SCRIPT_LIST() opts = get_opts(script_list, args=["install", dataset, "-e", "s", "-f", "downloaded_data.sqlite"]) script = opts["script"] engine = choose_engine(opts) if isinstance(script, list): for dataset in script: print "=> Installing", dataset.name dataset.download(engine, debug=debug) else: script.download(engine) print "Datasets successfully downloaded."
def __init__(self, lists): wx.App.__init__(self, redirect=False) mfs = wx.MemoryFSHandler() wx.FileSystem_AddHandler(mfs) mfs.AddFile("globe.png", icon.GetImage(), wx.BITMAP_TYPE_PNG) mfs.AddFile("cycle.png", cycle.GetImage(), wx.BITMAP_TYPE_PNG) mfs.AddFile("download.png", download.GetImage(), wx.BITMAP_TYPE_PNG) mfs.AddFile("downloaded.png", downloaded.GetImage(), wx.BITMAP_TYPE_PNG) mfs.AddFile("error.png", error.GetImage(), wx.BITMAP_TYPE_PNG) mfs.AddFile("warning.png", warning.GetImage(), wx.BITMAP_TYPE_PNG) mfs.AddFile("logo.png", logo.GetImage(), wx.BITMAP_TYPE_PNG) default_connection = get_default_connection() if default_connection: parameters = get_saved_connection(default_connection) parameters["engine"] = default_connection engine = choose_engine(parameters) else: wizard = ConnectWizard(lists, ENGINE_LIST) success = wizard.RunWizard(wizard.pages[0]) if not success: wizard.Destroy() return engine = wizard.CONNECTION.engine options = wizard.CONNECTION.option opts = dict() for key in options.keys(): opts[key] = options[key].GetValue() engine.opts = opts wizard.Destroy() try: engine.get_connection() except: pass self.frame = Frame( None, -1, "EcoData Retriever version %s" % VERSION, lists, engine) self.frame.Show()
def __init__(self, lists): wx.App.__init__(self, redirect=False) mfs = wx.MemoryFSHandler() wx.FileSystem_AddHandler(mfs) mfs.AddFile("globe.png", icon.GetImage(), wx.BITMAP_TYPE_PNG) mfs.AddFile("cycle.png", cycle.GetImage(), wx.BITMAP_TYPE_PNG) mfs.AddFile("download.png", download.GetImage(), wx.BITMAP_TYPE_PNG) mfs.AddFile("downloaded.png", downloaded.GetImage(), wx.BITMAP_TYPE_PNG) mfs.AddFile("error.png", error.GetImage(), wx.BITMAP_TYPE_PNG) mfs.AddFile("warning.png", warning.GetImage(), wx.BITMAP_TYPE_PNG) mfs.AddFile("logo.png", logo.GetImage(), wx.BITMAP_TYPE_PNG) default_connection = get_default_connection() if default_connection: parameters = get_saved_connection(default_connection) parameters["engine"] = default_connection engine = choose_engine(parameters) else: wizard = ConnectWizard(lists, ENGINE_LIST) success = wizard.RunWizard(wizard.pages[0]) if not success: wizard.Destroy() return engine = wizard.CONNECTION.engine options = wizard.CONNECTION.option opts = dict() for key in options.keys(): opts[key] = options[key].GetValue() engine.opts = opts wizard.Destroy() try: engine.get_connection() except: pass self.frame = Frame(None, -1, "EcoData Retriever version %s" % VERSION, lists, engine) self.frame.Show()
def download_public_data(datasets, data_dir='./data/'): """Download public datasets using the EcoData Retriever""" from retriever import VERSION, SCRIPT_LIST, ENGINE_LIST from retriever.lib.tools import choose_engine, get_opts for dataset in datasets: script_list = SCRIPT_LIST() opts = get_opts(script_list, args=[ 'install', dataset, '-e', 's', '-f', 'downloaded_data.sqlite' ]) script = opts["script"] engine = choose_engine(opts) if isinstance(script, list): for dataset in script: print "=> Installing", dataset.name dataset.download(engine, debug=debug) else: script.download(engine) print "Datasets successfully downloaded."
} } SCRIPT_LIST = SCRIPT_LIST() TEST_ENGINES = {} IGNORE = [ "forest-inventory-analysis", "bioclim", "prism-climate", "vertnet", "NPN", "mammal-super-tree" ] IGNORE = [dataset.lower() for dataset in IGNORE] for engine in ENGINE_LIST: if engine.abbreviation in engine_test: try: opts = engine_test[engine.abbreviation] TEST_ENGINES[engine.abbreviation] = choose_engine(opts) except: TEST_ENGINES[engine.abbreviation] = None pass errors = [] for module in MODULE_LIST: for (key, value) in list(TEST_ENGINES.items()): if module.SCRIPT.shortname.lower() not in IGNORE: if value != None: print("==>", module.__name__, value.name, "..........", module.SCRIPT.shortname) try: module.SCRIPT.download(value) except KeyboardInterrupt: pass
def main(): """This function launches the EcoData Retriever.""" if len(sys.argv) == 1 or (len(sys.argv) > 1 and sys.argv[1] == 'gui'): # if no command line args are passed, launch GUI check_for_updates(graphical=False if 'darwin' in platform.platform().lower() else True) lists = get_lists() from retriever.app.main import launch_app launch_app(lists) else: # otherwise, parse them script_list = SCRIPT_LIST() args = parser.parse_args() if args.quiet: sys.stdout = open(os.devnull, 'w') if args.command == 'help': parser.parse_args(['-h']) if hasattr(args, 'compile') and args.compile: script_list = SCRIPT_LIST(force_compile=True) if args.command == 'update': check_for_updates(graphical=False) script_list = SCRIPT_LIST() return elif args.command == 'citation': if args.dataset is None: citation_path = os.path.join(os.path.split(__file__)[0], '../CITATION') print citation_path with open(citation_path) as citation_file: print citation_file.read() else: scripts = name_matches(script_list, args.dataset) for dataset in scripts: print dataset.description return elif args.command == 'gui': lists = get_lists() from retriever.app.main import launch_app launch_app(lists) return elif args.command == 'new': f = open(args.filename, 'w') f.write(sample_script) f.close() return if args.command == 'ls' or args.dataset is None: import lscolumns #If scripts have never been downloaded there is nothing to list if not script_list: print "No scripts are currently available. Updating scripts now..." check_for_updates(graphical=False) print "\n\nScripts downloaded.\n" script_list = SCRIPT_LIST() all_scripts = set([script.shortname for script in script_list]) all_tags = set(["ALL"] + [tag.strip().upper() for script in script_list for tagset in script.tags for tag in tagset.split('>')]) print "Available datasets (%s):" % len(all_scripts) lscolumns.printls(sorted(list(all_scripts), key=lambda s: s.lower())) print "Groups:" lscolumns.printls(sorted(list(all_tags))) return engine = choose_engine(args.__dict__) if hasattr(args, 'debug') and args.debug: debug = True else: debug = False scripts = name_matches(script_list, args.dataset) if scripts: for dataset in scripts: print "=> Installing", dataset.name try: dataset.download(engine, debug=debug) dataset.engine.final_cleanup() except KeyboardInterrupt: pass except Exception as e: print e if debug: raise print "Done!" else: print "The dataset %s isn't currently available in the Retriever" % (args.dataset) print "Run 'retriever -ls to see a list of currently available datasets"
from retriever.lib.tools import choose_engine from retriever import MODULE_LIST, ENGINE_LIST, SCRIPT_LIST MODULE_LIST = MODULE_LIST() ENGINE_LIST = ENGINE_LIST() SCRIPT_LIST = SCRIPT_LIST() TEST_ENGINES = {} IGNORE = ["AvianBodyMass", "FIA"] for engine in ENGINE_LIST: opts = {} print "** %s **" % engine.name opts["engine"] = engine.abbreviation try: TEST_ENGINES[engine.abbreviation] = choose_engine(opts) TEST_ENGINES[engine.abbreviation].get_cursor() except: TEST_ENGINES[engine.abbreviation] = None pass errors = [] for module in MODULE_LIST: for (key, value) in TEST_ENGINES.items(): if value and not module.SCRIPT.shortname in IGNORE: print "==>", module.__name__, value.name try: module.SCRIPT.download(value) except KeyboardInterrupt: pass
def main(): """This function launches the Data Retriever.""" if len(sys.argv) == 1: # if no command line args are passed, show the help options parser.parse_args(['-h']) else: # otherwise, parse them script_list = SCRIPT_LIST() args = parser.parse_args() if args.command == "install" and not args.engine: parser.parse_args(['install','-h']) if args.quiet: sys.stdout = open(os.devnull, 'w') if args.command == 'help': parser.parse_args(['-h']) if hasattr(args, 'compile') and args.compile: script_list = SCRIPT_LIST(force_compile=True) if args.command == 'defaults': for engine_item in engine_list: print("Default options for engine ", engine_item.name) for default_opts in engine_item.required_opts: print(default_opts[0], " ", default_opts[2]) print() return if args.command == 'update': check_for_updates() script_list = SCRIPT_LIST() return elif args.command == 'citation': if args.dataset is None: print("\nCitation for retriever:\n") print(CITATION) else: scripts = name_matches(script_list, args.dataset) for dataset in scripts: print("\nDataset: {}".format(dataset.name)) print("Citation: {}".format(dataset.citation)) print("Description: {}\n".format(dataset.description)) return elif args.command == 'new': f = open(args.filename, 'w') f.write(sample_script) f.close() return elif args.command == 'reset': reset_retriever(args.scope) return elif args.command == 'new_json': # create new JSON script create_json() return elif args.command == 'edit_json': # edit existing JSON script for json_file in [filename for filename in os.listdir(os.path.join(HOME_DIR, 'scripts')) if filename[-5:] == '.json']: if json_file.lower().find(args.filename.lower()) != -1: edit_json(json_file) return raise Exception("File not found") elif args.command == 'delete_json': # delete existing JSON script for json_file in [filename for filename in os.listdir(os.path.join(HOME_DIR, 'scripts')) if filename[-5:] == '.json']: if json_file.lower().find(args.dataset.lower()) != -1: confirm = input("Really remove " + json_file + " and all its contents? (y/N): ") if confirm.lower().strip() in ['y', 'yes']: # raise Exception(json_file) os.remove(os.path.join(HOME_DIR, 'scripts', json_file)) try: os.remove(os.path.join( HOME_DIR, 'scripts', json_file[:-4] + 'py')) except: # Not compiled yet pass return raise Exception("File not found") if args.command == 'ls': # If scripts have never been downloaded there is nothing to list if not script_list: print("No scripts are currently available. Updating scripts now...") check_for_updates() print("\n\nScripts downloaded.\n") script_list = SCRIPT_LIST() all_scripts = [] for script in script_list: if script.shortname: if args.l is not None: script_name = script.name + "\nShortname: " + script.shortname + "\n" if script.tags: script_name += "Tags: " + \ str([tag for tag in script.tags]) + "\n" not_found = 0 for term in args.l: if script_name.lower().find(term.lower()) == -1: not_found = 1 break if not_found == 0: all_scripts.append(script_name) else: script_name = script.shortname all_scripts.append(script_name) all_scripts = sorted(all_scripts, key=lambda s: s.lower()) print("Available datasets : {}\n".format(len(all_scripts))) if args.l is None: from retriever import lscolumns lscolumns.printls(sorted(all_scripts, key=lambda s: s.lower())) else: count = 1 for script in all_scripts: print("%d. %s" % (count, script)) count += 1 return engine = choose_engine(args.__dict__) if hasattr(args, 'debug') and args.debug: debug = True else: debug = False sys.tracebacklimit = 0 if hasattr(args, 'debug') and args.not_cached: use_cache = False else: use_cache = True if args.dataset is not None: scripts = name_matches(script_list, args.dataset) else: raise Exception("no dataset specified.") if scripts: for dataset in scripts: print("=> Installing", dataset.name) try: dataset.download(engine, debug=debug, use_cache=use_cache) dataset.engine.final_cleanup() except KeyboardInterrupt: pass except Exception as e: print(e) if debug: raise print("Done!") else: print("The dataset {} isn't currently available in the Retriever".format( args.dataset)) print("Run 'retriever ls to see a list of currently available datasets")
def main(): """This function launches the Data Retriever.""" if len(sys.argv) == 1: # if no command line args are passed, show the help options parser.parse_args(['-h']) else: # otherwise, parse them script_list = SCRIPT_LIST() args = parser.parse_args() if args.quiet: sys.stdout = open(os.devnull, 'w') if args.command == 'help': parser.parse_args(['-h']) if hasattr(args, 'compile') and args.compile: script_list = SCRIPT_LIST(force_compile=True) if args.command == 'update': check_for_updates() script_list = SCRIPT_LIST() return elif args.command == 'citation': if args.dataset is None: citation_path = os.path.join(os.path.split(__file__)[0], '../CITATION') print("\nCitation for retriever:\n") with open(citation_path) as citation_file: print(citation_file.read()) else: scripts = name_matches(script_list, args.dataset) for dataset in scripts: print("\nDataset: {}".format(dataset.name)) print("Citation: {}".format(dataset.citation)) print("Description: {}\n".format(dataset.description)) return elif args.command == 'new': f = open(args.filename, 'w') f.write(sample_script) f.close() return elif args.command == 'reset': reset_retriever(args.scope) return elif args.command == 'new_json': # create new JSON script create_json() return elif args.command == 'edit_json': # edit existing JSON script for json_file in [filename for filename in os.listdir(os.path.join(HOME_DIR, 'scripts')) if filename[-5:] == '.json']: if json_file.lower().find(args.filename.lower()) != -1: edit_json(json_file) return raise Exception("File not found") elif args.command == 'delete_json': # delete existing JSON script for json_file in [filename for filename in os.listdir(os.path.join(HOME_DIR, 'scripts')) if filename[-5:] == '.json']: if json_file.lower().find(args.dataset.lower()) != -1: confirm = input("Really remove " + json_file + " and all its contents? (y/N): ") if confirm.lower().strip() in ['y', 'yes']: # raise Exception(json_file) os.remove(os.path.join(HOME_DIR, 'scripts', json_file)) try: os.remove(os.path.join( HOME_DIR, 'scripts', json_file[:-4] + 'py')) except: # Not compiled yet pass return raise Exception("File not found") if args.command == 'ls': # If scripts have never been downloaded there is nothing to list if not script_list: print("No scripts are currently available. Updating scripts now...") check_for_updates() print("\n\nScripts downloaded.\n") script_list = SCRIPT_LIST() all_scripts = [] for script in script_list: if script.shortname: if args.l is not None: script_name = script.name + "\nShortname: " + script.shortname + "\n" if script.tags: script_name += "Tags: " + \ str([tag for tag in script.tags]) + "\n" not_found = 0 for term in args.l: if script_name.lower().find(term.lower()) == -1: not_found = 1 break if not_found == 0: all_scripts.append(script_name) else: script_name = script.shortname all_scripts.append(script_name) all_scripts = sorted(all_scripts, key=lambda s: s.lower()) print("Available datasets : {}\n".format(len(all_scripts))) if args.l is None: from retriever import lscolumns lscolumns.printls(sorted(all_scripts, key=lambda s: s.lower())) else: count = 1 for script in all_scripts: print("%d. %s" % (count, script)) count += 1 return engine = choose_engine(args.__dict__) if hasattr(args, 'debug') and args.debug: debug = True else: debug = False sys.tracebacklimit = 0 if args.dataset is not None: scripts = name_matches(script_list, args.dataset) else: raise Exception("no dataset specified.") if scripts: for dataset in scripts: print("=> Installing", dataset.name) try: dataset.download(engine, debug=debug) dataset.engine.final_cleanup() except KeyboardInterrupt: pass except Exception as e: print(e) if debug: raise print("Done!") else: print("The dataset {} isn't currently available in the Retriever".format( args.dataset)) print("Run 'retriever ls to see a list of currently available datasets")
def main(): """This function launches the EcoData Retriever.""" if len(sys.argv) == 1 or (len(sys.argv) > 1 and sys.argv[1] == 'gui'): # if no command line args are passed, launch GUI check_for_updates(graphical=False if current_platform == 'darwin' else True) lists = get_lists() from retriever.app.main import launch_app launch_app(lists) else: # otherwise, parse them script_list = SCRIPT_LIST() args = parser.parse_args() if args.quiet: sys.stdout = open(os.devnull, 'w') if args.command == 'help': parser.parse_args(['-h']) if hasattr(args, 'compile') and args.compile: script_list = SCRIPT_LIST(force_compile=True) if args.command == 'update': check_for_updates(graphical=False) script_list = SCRIPT_LIST() return elif args.command == 'citation': if args.dataset is None: citation_path = os.path.join(os.path.split(__file__)[0], '../CITATION') print "\nCitation for retriever:\n" with open(citation_path) as citation_file: print citation_file.read() else: scripts = name_matches(script_list, args.dataset) for dataset in scripts: print ("\nCitation: {}".format(dataset.citation)) print ("Description: {}\n".format(dataset.description)) return elif args.command == 'gui': lists = get_lists() from retriever.app.main import launch_app launch_app(lists) return elif args.command == 'new': f = open(args.filename, 'w') f.write(sample_script) f.close() return elif args.command == 'reset': reset_retriever(args.scope) return if args.command == 'ls' or args.dataset is None: # If scripts have never been downloaded there is nothing to list if not script_list: print "No scripts are currently available. Updating scripts now..." check_for_updates(graphical=False) print "\n\nScripts downloaded.\n" script_list = SCRIPT_LIST() all_scripts = [] for script in script_list: if script.name: if args.l!=None: script_name = script.name + "\nShortname: " + script.shortname+"\n" if script.tags: script_name += "Tags: "+str([tag for tag in script.tags])+"\n" not_found = 0 for term in args.l: if script_name.lower().find(term.lower()) == -1: not_found = 1 break if not_found == 0: all_scripts.append(script_name) else: script_name = script.shortname all_scripts.append(script_name) all_scripts = sorted(all_scripts, key=lambda s: s.lower()) print "Available datasets : {}\n".format(len(all_scripts)) if args.l==None: import lscolumns lscolumns.printls(sorted(all_scripts, key=lambda s: s.lower())) else: count = 1 for script in all_scripts: print ("%d. %s"%(count, script)) count += 1 return engine = choose_engine(args.__dict__) if hasattr(args, 'debug') and args.debug: debug = True else: debug = False scripts = name_matches(script_list, args.dataset) if scripts: for dataset in scripts: print "=> Installing", dataset.name try: dataset.download(engine, debug=debug) dataset.engine.final_cleanup() except KeyboardInterrupt: pass except Exception as e: print e if debug: raise print "Done!" else: print "The dataset {} isn't currently available in the Retriever".format(args.dataset) print "Run 'retriever ls to see a list of currently available datasets"
def main(): """This function launches the Data Retriever.""" if len(sys.argv) == 1: # if no command line args are passed, show the help options parser.parse_args(['-h']) else: # otherwise, parse them script_list = SCRIPT_LIST() args = parser.parse_args() if args.command == "install" and not args.engine: parser.parse_args(['install','-h']) if args.quiet: sys.stdout = open(os.devnull, 'w') if args.command == 'help': parser.parse_args(['-h']) if hasattr(args, 'compile') and args.compile: script_list = SCRIPT_LIST(force_compile=True) if args.command == 'defaults': for engine_item in engine_list: print("Default options for engine ", engine_item.name) for default_opts in engine_item.required_opts: print(default_opts[0], " ", default_opts[2]) print() return if args.command == 'update': check_for_updates() script_list = SCRIPT_LIST() return elif args.command == 'citation': if args.dataset is None: print("\nCitation for retriever:\n") print(CITATION) else: scripts = name_matches(script_list, args.dataset) for dataset in scripts: print("\nDataset: {}".format(dataset.name)) print("Citation: {}".format(dataset.citation)) print("Description: {}\n".format(dataset.description)) return elif args.command == 'new': f = open(args.filename, 'w') f.write(sample_script) f.close() return elif args.command == 'reset': reset_retriever(args.scope) return elif args.command == 'new_json': # create new JSON script create_json() return elif args.command == 'edit_json': # edit existing JSON script json_file = get_script_filename(args.dataset.lower()) edit_json(json_file) return elif args.command == 'delete_json': # delete existing JSON script from home directory and or script directory if exists in current dir confirm = input("Really remove " + args.dataset.lower() + " and all its contents? (y/N): ") if confirm.lower().strip() in ['y', 'yes']: json_file = get_script_filename(args.dataset.lower()) delete_json(json_file) return if args.command == 'ls': # If scripts have never been downloaded there is nothing to list if not script_list: print("No scripts are currently available. Updating scripts now...") check_for_updates() print("\n\nScripts downloaded.\n") script_list = SCRIPT_LIST() all_scripts = [] for script in script_list: if script.name: if args.l is not None: script_name = script.title + "\nName: " + script.name + "\n" if script.keywords: script_name += "Keywords: " + \ str([tag for tag in script.keywords]) + "\n" not_found = 0 for term in args.l: if script_name.lower().find(term.lower()) == -1: not_found = 1 break if not_found == 0: all_scripts.append(script_name) else: script_name = script.name all_scripts.append(script_name) all_scripts = sorted(all_scripts, key=lambda s: s.lower()) print("Available datasets : {}\n".format(len(all_scripts))) if args.l is None: from retriever import lscolumns lscolumns.printls(sorted(all_scripts, key=lambda s: s.lower())) else: count = 1 for script in all_scripts: print("%d. %s" % (count, script)) count += 1 return engine = choose_engine(args.__dict__) if hasattr(args, 'debug') and args.debug: debug = True else: debug = False sys.tracebacklimit = 0 if hasattr(args, 'debug') and args.not_cached: engine.use_cache = False else: engine.use_cache = True if args.dataset is not None: scripts = name_matches(script_list, args.dataset) else: raise Exception("no dataset specified.") if scripts: for dataset in scripts: print("=> Installing", dataset.name) try: dataset.download(engine, debug=debug) dataset.engine.final_cleanup() except KeyboardInterrupt: pass except Exception as e: print(e) if debug: raise print("Done!") else: print("The dataset {} isn't currently available in the Retriever".format( args.dataset)) print("Run 'retriever ls to see a list of currently available datasets")