Exemplo n.º 1
0
import argparse
import os

import argcomplete
from argcomplete.completers import ChoicesCompleter

from retriever.engines import engine_list
from retriever.lib.defaults import VERSION
from retriever.lib.scripts import SCRIPT_LIST

module_list = SCRIPT_LIST()
script_list = []
json_list = []
keywords_list = []
licenses_list = []

for module in module_list:
    script_list.append(module.name)
    if os.path.isfile('.'.join(module._file.split('.')[:-1]) + '.json'):
        json_list.append(module.name)

    if hasattr(module, "keywords"):
        # Add list of keywords to keywords_list
        if module.keywords:
            keywords_list += module.keywords

    if hasattr(module, "licenses"):
        # Append string to list of licenses_list
        if module.licenses:
            for dict_items in module.licenses:
                if dict_items['name']:
Exemplo n.º 2
0
def main():
    """This function launches the Data Retriever."""
    if len(sys.argv) == 1:
        # if no command line args are passed, show the help options
        parser.parse_args(['-h'])

    else:
        # otherwise, parse them
        args = parser.parse_args()

        if args.command not in ['reset', 'update'] \
        and not os.path.isdir(SCRIPT_SEARCH_PATHS[1]) \
        and not [f for f in os.listdir(SCRIPT_SEARCH_PATHS[-1])
            if os.path.exists(SCRIPT_SEARCH_PATHS[-1])]:
            check_for_updates()
            reload_scripts()
        script_list = SCRIPT_LIST()

        if args.command == "install" and not args.engine:
            parser.parse_args(['install', '-h'])

        if args.quiet:
            sys.stdout = open(os.devnull, 'w')

        if args.command == 'help':
            parser.parse_args(['-h'])

        if hasattr(args, 'compile') and args.compile:
            script_list = reload_scripts()

        if args.command == 'defaults':
            for engine_item in engine_list:
                print("Default options for engine ", engine_item.name)
                for default_opts in engine_item.required_opts:
                    print(default_opts[0], " ", default_opts[2])
                print()
            return

        if args.command == 'update':
            check_for_updates()
            reload_scripts()
            return

        elif args.command == 'citation':
            if args.dataset is None:
                print("\nCitation for retriever:\n")
                print(CITATION)
            else:
                scripts = name_matches(script_list, args.dataset)
                for dataset in scripts:
                    print("\nDataset:  {}".format(dataset.name))
                    print("Citation:   {}".format(dataset.citation))
                    print("Description:   {}\n".format(dataset.description))

            return

        elif args.command == 'license':
            if args.dataset is None:
                print(LICENSE)
            else:
                dataset_license = license(args.dataset)
                if dataset_license:
                    print(dataset_license)
                else:
                    print("There is no license information for {}".format(
                        args.dataset))
            return

        elif args.command == 'new':
            f = open(args.filename, 'w')
            f.write(sample_script)
            f.close()

            return

        elif args.command == 'reset':
            reset_retriever(args.scope)
            return

        elif args.command == 'autocreate':
            if sum([args.f, args.d]) == 1:
                file_flag = True if args.f else False
                create_package(args.path, args.dt, file_flag, args.o,
                               args.skip_lines)
            else:
                print('Please use one and only one of the flags -f -d')
            return

        if args.command == 'ls':
            # scripts should never be empty because check_for_updates is run on SCRIPT_LIST init
            if not (args.l or args.k or isinstance(args.v, list)):
                all_scripts = dataset_names()
                from retriever import lscolumns
                all_scripts_combined = []
                for dataset in all_scripts['offline']:
                    all_scripts_combined.append((dataset, True))
                for dataset in all_scripts['online']:
                    if dataset in all_scripts['offline']:
                        continue
                    all_scripts_combined.append((dataset, False))
                all_scripts_combined = sorted(all_scripts_combined,
                                              key=lambda x: x[0])
                print("Available datasets : {}\n".format(
                    len(all_scripts_combined)))
                lscolumns.printls(all_scripts_combined)
                print("\nThe symbol * denotes the online datasets.")
                print(
                    "To see the full list of available online datasets, visit\n"
                    "https://github.com/weecology/retriever-recipes.")

            elif isinstance(args.v, list):
                online_scripts = []
                if args.v:
                    try:
                        all_scripts = [
                            get_script(dataset) for dataset in args.v
                        ]
                    except KeyError:
                        all_scripts = []
                        print("Dataset(s) is not found.")
                else:
                    scripts = datasets()
                    all_scripts = scripts['offline']
                    online_scripts = scripts['online']
                count = 1
                if not args.v:
                    print("Offline datasets : {}\n".format(len(all_scripts)))
                for script in all_scripts:
                    print("{count}. {title}\n {name}\n"
                          "{keywords}\n{description}\n"
                          "{licenses}\n{citation}\n"
                          "".format(
                              count=count,
                              title=script.title,
                              name=script.name,
                              keywords=script.keywords,
                              description=script.description,
                              licenses=str(script.licenses[0]['name']),
                              citation=script.citation,
                          ))
                    count += 1

                count = 1
                offline_scripts = [script.name for script in all_scripts]
                set_online_scripts = []
                for script in online_scripts:
                    if script in offline_scripts:
                        continue
                    set_online_scripts.append(script)
                if not args.v:
                    print("Online datasets : {}\n".format(
                        len(set_online_scripts)))
                for script in set_online_scripts:
                    print("{count}. {name}".format(count=count, name=script))
                    count += 1
            else:
                param_licenses = args.l if args.l else None
                keywords = args.k if args.k else None

                # search
                searched_scripts = datasets(keywords, param_licenses)
                offline_mesg = "Available offline datasets : {}\n"
                online_mesg = "Available online datasets : {}\n"
                if not searched_scripts:
                    print("No available datasets found")
                else:
                    print(offline_mesg.format(len(
                        searched_scripts['offline'])))
                    count = 1
                    for script in searched_scripts['offline']:
                        print("{count}. {title}\n{name}\n"
                              "{keywords}\n{licenses}\n".format(
                                  count=count,
                                  title=script.title,
                                  name=script.name,
                                  keywords=script.keywords,
                                  licenses=str(script.licenses[0]['name']),
                              ))
                        count += 1

                    count = 1
                    searched_scripts_offline = [
                        script.name for script in searched_scripts['offline']
                    ]
                    searched_scripts_online = []
                    for script in searched_scripts['online']:
                        if script in searched_scripts_offline:
                            continue
                        searched_scripts_online.append(script)
                    print(online_mesg.format(len(searched_scripts_online)))
                    for script in searched_scripts_online:
                        print("{count}. {name}".format(count=count,
                                                       name=script))
                        count += 1
            return
        elif args.command == 'commit':
            commit(dataset=args.dataset,
                   path=os.path.normpath(args.path) if args.path else None,
                   commit_message=args.message)
            return
        elif args.command == 'log':
            commit_log(dataset=args.dataset)
            return

        engine = choose_engine(args.__dict__)

        if hasattr(args, 'debug') and args.debug:
            debug = True
        else:
            debug = False
            sys.tracebacklimit = 0

        if hasattr(args, 'debug') and args.not_cached:
            use_cache = False
        else:
            use_cache = True
        engine.use_cache = use_cache
        if args.dataset is not None:
            scripts = name_matches(script_list, args.dataset)
        else:
            raise Exception("no dataset specified.")
        if scripts:
            if args.dataset.endswith('.zip') or args.hash_value:
                _install(vars(args), debug=debug, use_cache=use_cache)
                return
            for dataset in scripts:
                print("=> Installing", dataset.name)
                try:
                    dataset.download(engine, debug=debug)
                    dataset.engine.final_cleanup()
                except KeyboardInterrupt:
                    pass
                except Exception as e:
                    print(e)
                    if debug:
                        raise
            print("Done!")
        else:
            print(
                "Run 'retriever ls' to see a list of currently available datasets."
            )
Exemplo n.º 3
0
# sys removes the setdefaultencoding method at startup; reload to get it back
reload(sys)
if hasattr(sys, 'setdefaultencoding'):
    # set default encoding to latin-1 to decode source text
    sys.setdefaultencoding('latin-1')

# Create the .rst file for the available datasets
datasetfile = open_fw("datasets_list.rst")
datasetfile_title = """
==================
Datasets Available
==================


"""
script_list = SCRIPT_LIST()

# write the title of dataset rst file
datasetfile.write(datasetfile_title)

# get info from the scripts
for script_num, script in enumerate(script_list, start=1):
    if script.ref.strip():
        reference_link = script.ref
    elif bool(script.urls.values()):
        reference_link = list(script.urls.values())[0].rpartition('/')[0]
    else:
        reference_link = " "

    title = str(script_num) + ". **{}**\n".format(script.title)
    datasetfile.write(title)
Exemplo n.º 4
0
def install_modified():
    """Installs modified scripts and returns any errors found"""

    os_password = ""
    if os.name == "nt":
        os_password = "******"

    ignore = [
        "forest-inventory-analysis", "bioclim", "prism-climate", "vertnet",
        "NPN", "mammal-super-tree"
    ]
    ignore_list = [dataset.lower() for dataset in ignore]

    modified_scripts = get_modified_scripts()
    if modified_scripts is None:
        print("No new scripts found. Database is up to date.")
        sys.exit()

    engine_list_install = engine_list
    if os.path.exists("test_modified"):
        subprocess.call(['rm', '-r', 'test_modified'])
    os.makedirs("test_modified")
    os.chdir("test_modified")
    dbfile = os.path.normpath(
        os.path.join(os.getcwd(), 'testdb_retriever.sqlite'))
    engine_test = {
        "postgres": {
            'engine': 'postgres',
            'user': '******',
            'password': os_password,
            'host': 'localhost',
            'port': 5432,
            'database': 'postgres',
            'database_name': 'testschema',
            'table_name': '{db}.{table}'
        },
        "mysql": {
            'engine': 'mysql',
            'user': '******',
            'password': '',
            'host': 'localhost',
            'port': 3306,
            'database_name': 'testdb_retriever',
            'table_name': '{db}.{table}'
        },
        "xml": {
            'engine': 'xml',
            'table_name': 'output_file_{table}.xml'
        },
        "json": {
            'engine': 'json',
            'table_name': 'output_file_{table}.json'
        },
        "csv": {
            'engine': 'csv',
            'table_name': 'output_file_{table}.csv'
        },
        "sqlite": {
            'engine': 'sqlite',
            'file': dbfile,
            'table_name': '{db}_{table}'
        }
    }

    test_engines = {}
    for engine in engine_list_install:
        if engine.abbreviation in engine_test:
            try:
                opts = engine_test[engine.abbreviation]
                test_engines[engine.abbreviation] = choose_engine(opts)
            except BaseException:
                test_engines[engine.abbreviation] = None
                pass

    module_list = SCRIPT_LIST()
    errors = []
    for module in module_list:
        for (key, value) in list(test_engines.items()):
            shortname = module.name.lower()
            if module._name in modified_scripts and shortname not in ignore_list:
                if value is not None:
                    print("==>", module._name, value.name, "..........",
                          module.name)
                    try:
                        module.download(value)
                        module.engine.final_cleanup()
                    except KeyboardInterrupt:
                        pass
                    except Exception as e:
                        print("ERROR.")
                        errors.append((key, module._name, e))
                else:
                    errors.append(
                        (key, "No connection detected......" + module.name))
    os.chdir("..")
    subprocess.call(['rm', '-r', 'test_modified'])
    return errors
Exemplo n.º 5
0
def main():
    """This function launches the Data Retriever."""
    sys.argv[1:] = [arg.lower() for arg in sys.argv[1:]]
    if len(sys.argv) == 1:
        # if no command line args are passed, show the help options
        parser.parse_args(['-h'])

    else:
        # otherwise, parse them

        if not os.path.isdir(SCRIPT_SEARCH_PATHS[1]) and not \
                [f for f in os.listdir(SCRIPT_SEARCH_PATHS[-1])
                 if os.path.exists(SCRIPT_SEARCH_PATHS[-1])]:
            check_for_updates()
        script_list = SCRIPT_LIST()

        args = parser.parse_args()

        if args.command == "install" and not args.engine:
            parser.parse_args(['install', '-h'])

        if args.quiet:
            sys.stdout = open(os.devnull, 'w')

        if args.command == 'help':
            parser.parse_args(['-h'])

        if hasattr(args, 'compile') and args.compile:
            script_list = SCRIPT_LIST(force_compile=True)

        if args.command == 'defaults':
            for engine_item in engine_list:
                print("Default options for engine ", engine_item.name)
                for default_opts in engine_item.required_opts:
                    print(default_opts[0], " ", default_opts[2])
                print()
            return

        if args.command == 'update':
            check_for_updates(False)
            script_list = SCRIPT_LIST()
            return

        elif args.command == 'citation':
            if args.dataset is None:
                print("\nCitation for retriever:\n")
                print(CITATION)
            else:
                scripts = name_matches(script_list, args.dataset)
                for dataset in scripts:
                    print("\nDataset:  {}".format(dataset.name))
                    print("Citation:   {}".format(dataset.citation))
                    print("Description:   {}\n".format(dataset.description))

            return

        elif args.command == 'license':
            dataset_license = license(args.dataset)
            if dataset_license:
                print(dataset_license)
            else:
                print("There is no license information for {}".format(
                    args.dataset))
            return

        elif args.command == 'new':
            f = open(args.filename, 'w')
            f.write(sample_script)
            f.close()

            return

        elif args.command == 'reset':
            reset_retriever(args.scope)
            return

        elif args.command == 'new_json':
            # create new JSON script
            create_json()
            return

        elif args.command == 'edit_json':
            # edit existing JSON script
            json_file = get_script_filename(args.dataset.lower())
            edit_json(json_file)
            return

        elif args.command == 'delete_json':
            # delete existing JSON script from home directory and or script directory if exists in current dir
            confirm = input("Really remove " + args.dataset.lower() +
                            " and all its contents? (y/N): ")
            if confirm.lower().strip() in ['y', 'yes']:
                json_file = get_script_filename(args.dataset.lower())
                delete_json(json_file)
            return

        if args.command == 'ls':
            # If scripts have never been downloaded there is nothing to list
            if not script_list:
                print(
                    "No scripts are currently available. Updating scripts now..."
                )
                check_for_updates(False)
                print("\n\nScripts downloaded.\n")

            if args.l is None and args.k is None:
                all_scripts = dataset_names()
                print("Available datasets : {}\n".format(len(all_scripts)))
                from retriever import lscolumns
                lscolumns.printls(all_scripts)
            else:
                param_licenses = args.l if args.l else None
                keywords = args.k if args.k else None

                # search
                searched_scripts = datasets(keywords, param_licenses)
                if not searched_scripts:
                    print("No available datasets found")
                else:
                    print("Available datasets : {}\n".format(
                        len(searched_scripts)))
                    count = 1
                    for script in searched_scripts:
                        print("{}. {}\n{}\n{}\n{}\n".format(
                            count, script.title, script.name, script.keywords,
                            str(script.licenses[0]['name'])))
                        count += 1
            return

        engine = choose_engine(args.__dict__)

        if hasattr(args, 'debug') and args.debug:
            debug = True
        else:
            debug = False
            sys.tracebacklimit = 0

        if hasattr(args, 'debug') and args.not_cached:
            engine.use_cache = False
        else:
            engine.use_cache = True

        if args.dataset is not None:
            scripts = name_matches(script_list, args.dataset)
        else:
            raise Exception("no dataset specified.")
        if scripts:
            for dataset in scripts:
                print("=> Installing", dataset.name)
                try:
                    dataset.download(engine, debug=debug)
                    dataset.engine.final_cleanup()
                except KeyboardInterrupt:
                    pass
                except Exception as e:
                    print(e)
                    if debug:
                        raise
            print("Done!")
        else:
            print("The dataset {} isn't currently available in the Retriever".
                  format(args.dataset))
            print(
                "Run 'retriever ls to see a list of currently available datasets"
            )
Exemplo n.º 6
0
def dataset_licenses():
    """Return set with all available licenses."""
    license_values = [str(script.licenses[0]['name']).lower() for script in SCRIPT_LIST()]
    return set(license_values)
Exemplo n.º 7
0
from __future__ import print_function

import glob
import json
import os
import re
from builtins import input
from time import sleep

from retriever.lib.defaults import HOME_DIR, ENCODING
from retriever.lib.scripts import SCRIPT_LIST

short_names = [script.name.lower() for script in SCRIPT_LIST()]


def is_empty(val):
    """Check if a variable is an empty string or an empty list."""
    return val == "" or val == []


def clean_input(prompt="", split_char='', ignore_empty=False, dtype=None):
    """Clean the user-input from the CLI before adding it."""
    while True:
        val = input(prompt).strip()
        # split to list type if split_char specified
        if split_char != "":
            val = [v.strip() for v in val.split(split_char) if v.strip() != ""]
        # do not ignore empty input if not allowed
        if not ignore_empty and is_empty(val):
            print("\tError: empty input. Need one or more values.\n")
            continue
Exemplo n.º 8
0
def _install(args, use_cache, debug):
    """Install datasets for retriever."""
    engine = choose_engine(args)
    engine.use_cache = use_cache

    if args['dataset'].endswith('.zip') or args.get('hash_value'):
        path_to_archive = args['dataset']
        if args.get('hash_value'):
            path_to_archive = os.path.join(
                PROVENANCE_DIR, args['dataset'],
                '{}-{}.zip'.format(args['dataset'], args['hash_value']))
        if not os.path.exists(path_to_archive):
            print('The committed file does not exist.')
        engine = install_committed(path_to_archive,
                                   engine,
                                   force=args.get('force', False))
        return engine
    script_list = SCRIPT_LIST()
    if not (script_list or os.listdir(SCRIPT_WRITE_PATH)):
        check_for_updates()
        script_list = SCRIPT_LIST()
    data_sets_scripts = name_matches(script_list, args['dataset'])
    if data_sets_scripts:
        for data_sets_script in data_sets_scripts:
            print("=> Installing", data_sets_script.name)
            try:
                if engine.name == "HDF5":
                    sqlite_opts = {
                        'command': 'install',
                        'dataset': data_sets_script,
                        'engine': 'sqlite',
                        'file': (args["file"].split("."))[0] + ".db",
                        'table_name': args["table_name"],
                        'data_dir': args["data_dir"]
                    }
                    sqlite_engine = choose_engine(sqlite_opts)
                    data_sets_script.download(sqlite_engine, debug=debug)
                    data_sets_script.engine.final_cleanup()
                engine.script_table_registry = OrderedDict()
                data_sets_script.download(engine, debug=debug)
                data_sets_script.engine.final_cleanup()
            except Exception as e:
                print(e)
                if debug:
                    raise
    elif args['dataset'].startswith('socrata') and not data_sets_scripts:
        socrata_id = args['dataset'].split('-', 1)[1]
        resource = find_socrata_dataset_by_id(socrata_id)

        if "error" in resource.keys():
            if resource["datatype"][0] == "map":
                print("{} because map type datasets are not supported".format(
                    resource["error"]))
            else:
                print("{} because it is of type {} and not tabular".format(
                    resource["error"], resource["datatype"][1]))
        elif len(resource.keys()) == 0:
            return
        else:
            print("=> Installing", args['dataset'])
            name = f"socrata-{socrata_id}"
            create_socrata_dataset(engine, name, resource)
            if args['command'] == 'download':
                return engine
            else:
                script_list = SCRIPT_LIST()
                script = get_script(args['dataset'])
                script.download(engine, debug=debug)
                script.engine.final_cleanup()
    elif args['dataset'].startswith('rdataset') and not data_sets_scripts:
        print("=> Installing", args['dataset'])
        rdataset = args['dataset'].split('-')
        update_rdataset_catalog()
        package, dataset_name = rdataset[1], rdataset[2]
        create_rdataset(engine, package, dataset_name)
        if args['command'] == 'download':
            return engine
        else:
            script_list = SCRIPT_LIST()
            script = get_script(args['dataset'])
            script.download(engine, debug=debug)
            script.engine.final_cleanup()
    else:
        message = "Run retriever.datasets() to list the currently available " \
                  "datasets."
        raise ValueError(message)
    return engine