Ejemplo n.º 1
0
def _install(args, use_cache, debug):
    """Install datasets for retriever."""
    engine = choose_engine(args)
    engine.use_cache = use_cache

    if args['dataset'].endswith('.zip') or args['hash_value']:
        path_to_archive = args['dataset']
        if args['hash_value']:
            path_to_archive = os.path.join(
                PROVENANCE_DIR, args['dataset'],
                '{}-{}.zip'.format(args['dataset'], args['hash_value']))
        if not os.path.exists(path_to_archive):
            print('The committed file does not exist.')
        engine = install_committed(path_to_archive,
                                   engine,
                                   force=args.get('force', False))
        return engine
    script_list = SCRIPT_LIST()
    if not (script_list or os.listdir(SCRIPT_WRITE_PATH)):
        check_for_updates()
        script_list = SCRIPT_LIST()
    data_sets_scripts = name_matches(script_list, args['dataset'])
    if data_sets_scripts:
        for data_sets_script in data_sets_scripts:
            print("=> Installing", data_sets_script.name)
            try:
                if engine.name == "HDF5":
                    sqlite_opts = {
                        'command': 'install',
                        'dataset': data_sets_script,
                        'engine': 'sqlite',
                        'file': (args["file"].split("."))[0] + ".db",
                        'table_name': args["table_name"],
                        'data_dir': args["data_dir"]
                    }
                    sqlite_engine = choose_engine(sqlite_opts)
                    data_sets_script.download(sqlite_engine, debug=debug)
                    data_sets_script.engine.final_cleanup()
                engine.script_table_registry = OrderedDict()
                data_sets_script.download(engine, debug=debug)
                data_sets_script.engine.final_cleanup()
            except Exception as e:
                print(e)
                if debug:
                    raise
    else:
        message = "Run retriever.datasets() to list the currently available " \
                  "datasets."
        raise ValueError(message)
    return engine
Ejemplo n.º 2
0
def download(dataset, path='./', quiet=False, sub_dir='', debug=False, use_cache=True):
    """Download scripts for retriever."""
    args = {
        'dataset': dataset,
        'command': 'download',
        'path': path,
        'sub_dir': sub_dir,
        'quiet': quiet
    }
    engine = choose_engine(args)
    engine.use_cache = use_cache

    script_list = SCRIPT_LIST()
    if not script_list or not os.listdir(SCRIPT_WRITE_PATH):
        check_for_updates()
        script_list = SCRIPT_LIST()
    scripts = name_matches(script_list, args['dataset'])
    if scripts:
        for script in scripts:
            print("=> Downloading", script.name)
            try:
                script.download(engine, debug=debug)
                script.engine.final_cleanup()
            except Exception as e:
                print(e)
                if debug:
                    raise
    else:
        message = "Run retriever.datasets() to see the list of currently " \
                  "available datasets."
        raise ValueError(message)
    return engine
Ejemplo n.º 3
0
def download(dataset, path='./', quiet=False, subdir=False, debug=False):
    """Download scripts for retriever."""
    args = {
        'dataset': dataset,
        'command': 'download',
        'path': path,
        'subdir': subdir,
        'quiet': quiet
    }
    engine = choose_engine(args)
    script_list = SCRIPT_LIST()
    if not script_list or not os.listdir(SCRIPT_WRITE_PATH):
        check_for_updates()
        script_list = SCRIPT_LIST()
    scripts = name_matches(script_list, args['dataset'])
    if scripts:
        for script in scripts:
            print("=> Downloading", script.name)
            try:
                script.download(engine, debug=debug)
                script.engine.final_cleanup()
            except Exception as e:
                print(e)
                if debug:
                    raise
    else:
        message = "Run retriever.datasets() to see a list of currently " \
                  "available datasets."
        raise ValueError(message)
Ejemplo n.º 4
0
def _install(args, use_cache, debug):
    """Install datasets for retriever."""
    engine = choose_engine(args)
    engine.use_cache = use_cache

    script_list = SCRIPT_LIST()
    if not script_list or not os.listdir(SCRIPT_WRITE_PATH):
        check_for_updates()
        script_list = SCRIPT_LIST(force_compile=False)
    data_sets_scripts = name_matches(script_list, args['dataset'])
    if data_sets_scripts:
        for data_sets_script in data_sets_scripts:
            print("=> Installing", data_sets_script.name)
            try:
                data_sets_script.download(engine, debug=debug)
                data_sets_script.engine.final_cleanup()
            except Exception as e:
                print(e)
                if debug:
                    raise
    else:
        message = "The dataset \"{}\" isn't available in the Retriever. " \
                  "Run retriever.datasets()to list the currently available " \
                  "datasets".format(args['dataset'])
        raise ValueError(message)
Ejemplo n.º 5
0
def _install(args, use_cache, debug):
    """Install datasets for retriever."""
    engine = choose_engine(args)
    engine.use_cache = use_cache

    script_list = SCRIPT_LIST()
    if not (script_list or os.listdir(SCRIPT_WRITE_PATH)):
        check_for_updates()
        script_list = SCRIPT_LIST()
    data_sets_scripts = name_matches(script_list, args['dataset'])
    if data_sets_scripts:
        for data_sets_script in data_sets_scripts:
            try:
                engine.script_table_registry = OrderedDict()
                data_sets_script.download(engine, debug=debug)
                data_sets_script.engine.final_cleanup()
            except Exception as e:
                print(e)
                if debug:
                    raise
    else:
        message = "Run retriever.datasets()to list the currently available " \
                  "datasets."
        raise ValueError(message)
    return engine
Ejemplo n.º 6
0
def _install(args, use_cache, debug):
    """Install datasets for retriever."""
    engine = choose_engine(args)
    engine.use_cache = use_cache

    script_list = SCRIPT_LIST()
    if not script_list or not os.listdir(SCRIPT_WRITE_PATH):
        check_for_updates()
        script_list = SCRIPT_LIST(force_compile=False)
    data_sets_scripts = name_matches(script_list, args['dataset'])
    if data_sets_scripts:
        for data_sets_script in data_sets_scripts:
            print("=> Installing", data_sets_script.name)
            try:
                data_sets_script.download(engine, debug=debug)
                data_sets_script.engine.final_cleanup()
            except Exception as e:
                print(e)
                if debug:
                    raise
    else:
        message = "The dataset \"{}\" isn't available in the Retriever. " \
                  "Run retriever.datasets()to list the currently available " \
                  "datasets".format(args['dataset'])
        raise ValueError(message)
Ejemplo n.º 7
0
def download(dataset, path='./', quiet=False, subdir=False, debug=False):
    """Download scripts for retriever."""
    args = {
        'dataset': dataset,
        'command': 'download',
        'path': path,
        'subdir': subdir,
        'quiet': quiet
    }
    engine = choose_engine(args)

    scripts = name_matches(script_list, args['dataset'])
    if scripts:
        for script in scripts:
            print("=> Downloading", script.name)
            try:
                script.download(engine, debug=debug)
                script.engine.final_cleanup()
            except Exception as e:
                print(e)
                if debug:
                    raise
    else:
        message = "The dataset \"{}\" isn't currently available in the Retriever. " \
                  "Run retriever.datasets() to see a list of currently " \
                  "available datasets".format(args['dataset'])
        raise ValueError(message)
Ejemplo n.º 8
0
def download(dataset,
             path='./',
             quiet=False,
             sub_dir='',
             debug=False,
             use_cache=True):
    """Download scripts for retriever."""
    args = {
        'dataset': dataset,
        'command': 'download',
        'path': path,
        'sub_dir': sub_dir,
        'quiet': quiet
    }
    engine = choose_engine(args)
    engine.use_cache = use_cache

    script_list = SCRIPT_LIST()
    if not script_list or not os.listdir(SCRIPT_WRITE_PATH):
        check_for_updates()
        script_list = SCRIPT_LIST()
    scripts = name_matches(script_list, args['dataset'])
    if scripts:
        for script in scripts:
            print("=> Downloading", script.name)
            try:
                script.download(engine, debug=debug)
                script.engine.final_cleanup()
            except Exception as e:
                print(e)
                if debug:
                    raise
    elif args['dataset'].startswith('socrata') and (scripts is None):
        socrata_id = args['dataset'].split('-', 1)[1]
        resource = find_socrata_dataset_by_id(socrata_id)

        if "error" in resource.keys():
            if resource["datatype"][0] == "map":
                print("{} because map type datasets are not supported".format(
                    resource["error"]))
            else:
                print("{} because it is of type {} and not tabular".format(
                    resource["error"], resource["datatype"][1]))
        elif len(resource.keys()) == 0:
            return
        else:
            print("=> Downloading", args['dataset'])
            name = f"socrata-{socrata_id}"
            create_socrata_dataset(engine, name, resource)
    elif (scripts is None) and (args['dataset'].startswith('rdataset')):
        print("=> Downloading", args['dataset'])
        rdataset = args['dataset'].split('-')
        update_rdataset_catalog()
        package, dataset_name = rdataset[1], rdataset[2]
        create_rdataset(engine, package, dataset_name)
    else:
        message = "Run retriever.datasets() to see the list of currently " \
                  "available datasets."
        raise ValueError(message)
    return engine
Ejemplo n.º 9
0
def _install(args, use_cache, debug):
    """Install datasets for retriever."""
    engine = choose_engine(args)
    engine.use_cache = use_cache

    script_list = SCRIPT_LIST()
    if not (script_list or os.listdir(SCRIPT_WRITE_PATH)):
        check_for_updates()
        script_list = SCRIPT_LIST()
    data_sets_scripts = name_matches(script_list, args['dataset'])
    if data_sets_scripts:
        for data_sets_script in data_sets_scripts:
            try:
                engine.script_table_registry = OrderedDict()
                data_sets_script.download(engine, debug=debug)
                data_sets_script.engine.final_cleanup()
            except Exception as e:
                print(e)
                if debug:
                    raise
    else:
        message = "Run retriever.datasets()to list the currently available " \
                  "datasets."
        raise ValueError(message)
    return engine
Ejemplo n.º 10
0
 def checkengine(self, engine=None):
     """Returns the required engine instance"""
     if engine is None:
         opts = {}
         engine = choose_engine(opts)
     engine.get_input()
     engine.script = self
     return engine
Ejemplo n.º 11
0
 def checkengine(self, engine=None):
     """Returns the required engine instance"""
     if engine is None:
         opts = {}
         engine = choose_engine(opts)
     engine.get_input()
     engine.script = self
     return engine
Ejemplo n.º 12
0
def commit_writer(dataset, commit_message, path, quiet):
    """
    Creates the committed zipped file
    """
    paths_to_zip = {"script": dataset._file, "raw_data": []}
    raw_dir = os.path.join(HOME_DIR, "raw_data")
    data_exists = False
    if dataset.name not in os.listdir(raw_dir):
        engine = choose_engine({
            "command": "download",
            "path": "./",
            "sub_dir": ""
        })
        dataset.download(engine=engine, debug=quiet)
        data_exists = True

    elif dataset.name in os.listdir(raw_dir):
        data_exists = True

    if data_exists:
        for root, _, files in os.walk(os.path.join(raw_dir, dataset.name)):
            for file in files:
                paths_to_zip["raw_data"].append(os.path.join(root, file))

        info = commit_info_for_commit(dataset, commit_message=commit_message)
        zip_file_name = "{}-{}{}.zip".format(dataset.name,
                                             info["md5_dataset"][:3],
                                             info["md5_script"][:3])

        zip_file_path = os.path.join(path, zip_file_name)
        with ZipFile(zip_file_path, "w") as zipped:
            zipped.write(
                paths_to_zip["script"],
                os.path.join("script",
                             os.path.basename(paths_to_zip["script"])),
            )
            for data_file in paths_to_zip["raw_data"]:
                zipped.write(data_file, data_file.replace(raw_dir, ""))
            with open("metadata.json", "w") as json_file:
                json.dump(info, json_file, sort_keys=True, indent=4)
            zipped.write(os.path.abspath(json_file.name), "metadata.json")
            os.remove("metadata.json")
Ejemplo n.º 13
0
    }
}

SCRIPT_LIST = SCRIPT_LIST()
TEST_ENGINES = {}
IGNORE = [
    "forest-inventory-analysis", "bioclim", "prism-climate", "vertnet", "NPN",
    "mammal-super-tree"
]
IGNORE = [dataset.lower() for dataset in IGNORE]

for engine in engine_list:
    if engine.abbreviation in engine_test:
        try:
            opts = engine_test[engine.abbreviation]
            TEST_ENGINES[engine.abbreviation] = choose_engine(opts)
        except:
            TEST_ENGINES[engine.abbreviation] = None
            pass

errors = []
for module in MODULE_LIST:
    for (key, value) in list(TEST_ENGINES.items()):
        if module.SCRIPT.name.lower() not in IGNORE:
            if value != None:
                print("==>", module.__name__, value.name, "..........",
                      module.SCRIPT.name)
                try:
                    module.SCRIPT.download(value)
                except KeyboardInterrupt:
                    pass
Ejemplo n.º 14
0
def main():
    """This function launches the Data Retriever."""
    if len(sys.argv) == 1:
        # if no command line args are passed, show the help options
        parser.parse_args(['-h'])

    else:
        # otherwise, parse them
        args = parser.parse_args()

        if args.command not in ['reset', 'update'] \
        and not os.path.isdir(SCRIPT_SEARCH_PATHS[1]) \
        and not [f for f in os.listdir(SCRIPT_SEARCH_PATHS[-1])
            if os.path.exists(SCRIPT_SEARCH_PATHS[-1])]:
            check_for_updates()
            reload_scripts()
        script_list = SCRIPT_LIST()

        if args.command == "install" and not args.engine:
            parser.parse_args(['install', '-h'])

        if args.quiet:
            sys.stdout = open(os.devnull, 'w')

        if args.command == 'help':
            parser.parse_args(['-h'])

        if hasattr(args, 'compile') and args.compile:
            script_list = reload_scripts()

        if args.command == 'defaults':
            for engine_item in engine_list:
                print("Default options for engine ", engine_item.name)
                for default_opts in engine_item.required_opts:
                    print(default_opts[0], " ", default_opts[2])
                print()
            return

        if args.command == 'update':
            check_for_updates()
            reload_scripts()
            return

        elif args.command == 'citation':
            if args.dataset is None:
                print("\nCitation for retriever:\n")
                print(CITATION)
            else:
                scripts = name_matches(script_list, args.dataset)
                for dataset in scripts:
                    print("\nDataset:  {}".format(dataset.name))
                    print("Citation:   {}".format(dataset.citation))
                    print("Description:   {}\n".format(dataset.description))

            return

        elif args.command == 'license':
            if args.dataset is None:
                print(LICENSE)
            else:
                dataset_license = license(args.dataset)
                if dataset_license:
                    print(dataset_license)
                else:
                    print("There is no license information for {}".format(
                        args.dataset))
            return

        elif args.command == 'new':
            f = open(args.filename, 'w')
            f.write(sample_script)
            f.close()

            return

        elif args.command == 'reset':
            reset_retriever(args.scope)
            return

        elif args.command == 'new_json':
            # create new JSON script
            create_json()
            return

        elif args.command == 'edit_json':
            # edit existing JSON script
            json_file = get_script_filename(args.dataset.lower())
            edit_json(json_file)
            return

        elif args.command == 'autocreate':
            if sum([args.f, args.d]) == 1:
                file_flag = True if args.f else False
                create_package(args.path, args.dt, file_flag, args.o,
                               args.skip_lines)
            else:
                print('Please use one and only one of the flags -f -d')
            return

        elif args.command == 'delete_json':
            # delete existing JSON script from home directory and or script directory if exists in current dir
            confirm = input("Really remove " + args.dataset.lower() +
                            " and all its contents? (y/N): ")
            if confirm.lower().strip() in ['y', 'yes']:
                json_file = get_script_filename(args.dataset.lower())
                delete_json(json_file)
            return

        if args.command == 'ls':
            # scripts should never be empty because check_for_updates is run on SCRIPT_LIST init
            if not (args.l or args.k or isinstance(args.v, list)):
                all_scripts = dataset_names()
                print("Available datasets : {}\n".format(len(all_scripts)))
                from retriever import lscolumns

                lscolumns.printls(all_scripts)

            elif isinstance(args.v, list):
                if args.v:
                    try:
                        all_scripts = [
                            get_script(dataset) for dataset in args.v
                        ]
                    except KeyError:
                        all_scripts = []
                        print("Dataset(s) is not found.")
                else:
                    all_scripts = datasets()
                count = 1
                for script in all_scripts:
                    print("{count}. {title}\n {name}\n"
                          "{keywords}\n{description}\n"
                          "{licenses}\n{citation}\n"
                          "".format(
                              count=count,
                              title=script.title,
                              name=script.name,
                              keywords=script.keywords,
                              description=script.description,
                              licenses=str(script.licenses[0]['name']),
                              citation=script.citation,
                          ))
                    count += 1

            else:
                param_licenses = args.l if args.l else None
                keywords = args.k if args.k else None

                # search
                searched_scripts = datasets(keywords, param_licenses)
                if not searched_scripts:
                    print("No available datasets found")
                else:
                    print("Available datasets : {}\n".format(
                        len(searched_scripts)))
                    count = 1
                    for script in searched_scripts:
                        print("{count}. {title}\n{name}\n"
                              "{keywords}\n{licenses}\n".format(
                                  count=count,
                                  title=script.title,
                                  name=script.name,
                                  keywords=script.keywords,
                                  licenses=str(script.licenses[0]['name']),
                              ))
                        count += 1
            return

        engine = choose_engine(args.__dict__)

        if hasattr(args, 'debug') and args.debug:
            debug = True
        else:
            debug = False
            sys.tracebacklimit = 0

        if hasattr(args, 'debug') and args.not_cached:
            engine.use_cache = False
        else:
            engine.use_cache = True

        if args.dataset is not None:
            scripts = name_matches(script_list, args.dataset)
        else:
            raise Exception("no dataset specified.")
        if scripts:
            for dataset in scripts:
                print("=> Installing", dataset.name)
                try:
                    dataset.download(engine, debug=debug)
                    dataset.engine.final_cleanup()
                except KeyboardInterrupt:
                    pass
                except Exception as e:
                    print(e)
                    if debug:
                        raise
            print("Done!")
        else:
            print(
                "Run 'retriever ls' to see a list of currently available datasets."
            )
Ejemplo n.º 15
0
def main():
    """This function launches the Data Retriever."""
    if len(sys.argv) == 1:
        # if no command line args are passed, show the help options
        parser.parse_args(['-h'])

    else:
        # otherwise, parse them
        args = parser.parse_args()

        if args.command not in ['reset', 'update'] \
        and not os.path.isdir(SCRIPT_SEARCH_PATHS[1]) \
        and not [f for f in os.listdir(SCRIPT_SEARCH_PATHS[-1])
            if os.path.exists(SCRIPT_SEARCH_PATHS[-1])]:
                check_for_updates()
                reload_scripts()
        script_list = SCRIPT_LIST()

        if args.command == "install" and not args.engine:
            parser.parse_args(['install', '-h'])

        if args.quiet:
            sys.stdout = open(os.devnull, 'w')

        if args.command == 'help':
            parser.parse_args(['-h'])

        if hasattr(args, 'compile') and args.compile:
            script_list = reload_scripts()

        if args.command == 'defaults':
            for engine_item in engine_list:
                print("Default options for engine ", engine_item.name)
                for default_opts in engine_item.required_opts:
                    print(default_opts[0], " ", default_opts[2])
                print()
            return

        if args.command == 'update':
            check_for_updates()
            reload_scripts()
            return

        elif args.command == 'citation':
            if args.dataset is None:
                print("\nCitation for retriever:\n")
                print(CITATION)
            else:
                scripts = name_matches(script_list, args.dataset)
                for dataset in scripts:
                    print("\nDataset:  {}".format(dataset.name))
                    print("Citation:   {}".format(dataset.citation))
                    print("Description:   {}\n".format(dataset.description))

            return

        elif args.command == 'license':
            if args.dataset is None:
                print(LICENSE)
            else:
                dataset_license = license(args.dataset)
                if dataset_license:
                    print(dataset_license)
                else:
                    print("There is no license information for {}".format(args.dataset))
            return

        elif args.command == 'new':
            f = open(args.filename, 'w')
            f.write(sample_script)
            f.close()

            return

        elif args.command == 'reset':
            reset_retriever(args.scope)
            return

        elif args.command == 'new_json':
            # create new JSON script
            create_json()
            return

        elif args.command == 'edit_json':
            # edit existing JSON script
            json_file = get_script_filename(args.dataset.lower())
            edit_json(json_file)
            return

        elif args.command == 'autocreate':
            if sum([args.f, args.d]) == 1:
                file_flag = True if args.f else False
                create_package(args.path, args.dt, file_flag, args.o, args.skip_lines)
            else:
                print('Please use one and only one of the flags -f -d')
            return

        elif args.command == 'delete_json':
            # delete existing JSON script from home directory and or script directory if exists in current dir
            confirm = input("Really remove " + args.dataset.lower() +
                            " and all its contents? (y/N): ")
            if confirm.lower().strip() in ['y', 'yes']:
                json_file = get_script_filename(args.dataset.lower())
                delete_json(json_file)
            return

        if args.command == 'ls':
            # scripts should never be empty because check_for_updates is run on SCRIPT_LIST init
            if not (args.l or args.k or isinstance(args.v, list)):
                all_scripts = dataset_names()
                print("Available datasets : {}\n".format(len(all_scripts)))
                from retriever import lscolumns

                lscolumns.printls(all_scripts)

            elif isinstance(args.v, list):
                if args.v:
                    try:
                        all_scripts = [get_script(dataset) for dataset in args.v]
                    except KeyError:
                        all_scripts = []
                        print("Dataset(s) is not found.")
                else:
                    all_scripts = datasets()
                count = 1
                for script in all_scripts:
                    print(
                        "{count}. {title}\n {name}\n"
                        "{keywords}\n{description}\n"
                        "{licenses}\n{citation}\n"
                        "".format(
                            count=count,
                            title=script.title,
                            name=script.name,
                            keywords=script.keywords,
                            description=script.description,
                            licenses=str(script.licenses[0]['name']),
                            citation=script.citation,
                        )
                    )
                    count += 1

            else:
                param_licenses = args.l if args.l else None
                keywords = args.k if args.k else None

                # search
                searched_scripts = datasets(keywords, param_licenses)
                if not searched_scripts:
                    print("No available datasets found")
                else:
                    print("Available datasets : {}\n".format(len(searched_scripts)))
                    count = 1
                    for script in searched_scripts:
                        print(
                            "{count}. {title}\n{name}\n"
                            "{keywords}\n{licenses}\n".format(
                                count=count,
                                title=script.title,
                                name=script.name,
                                keywords=script.keywords,
                                licenses=str(script.licenses[0]['name']),
                            )
                        )
                        count += 1
            return

        engine = choose_engine(args.__dict__)

        if hasattr(args, 'debug') and args.debug:
            debug = True
        else:
            debug = False
            sys.tracebacklimit = 0

        if hasattr(args, 'debug') and args.not_cached:
            engine.use_cache = False
        else:
            engine.use_cache = True

        if args.dataset is not None:
            scripts = name_matches(script_list, args.dataset)
        else:
            raise Exception("no dataset specified.")
        if scripts:
            for dataset in scripts:
                print("=> Installing", dataset.name)
                try:
                    dataset.download(engine, debug=debug)
                    dataset.engine.final_cleanup()
                except KeyboardInterrupt:
                    pass
                except Exception as e:
                    print(e)
                    if debug:
                        raise
            print("Done!")
        else:
            print("Run 'retriever ls' to see a list of currently available datasets.")
Ejemplo n.º 16
0
            'table_name': 'output_file_{table}.csv'},

    "sqlite": {'engine': 'sqlite',
               'file': dbfile, 'table_name': '{db}_{table}'}
}

SCRIPT_LIST = SCRIPT_LIST()
TEST_ENGINES = {}
IGNORE = ["forest-inventory-analysis", "bioclim", "prism-climate", "vertnet", "NPN", "mammal-super-tree"]
IGNORE = [dataset.lower() for dataset in IGNORE]

for engine in engine_list:
    if engine.abbreviation in engine_test:
        try:
            opts = engine_test[engine.abbreviation]
            TEST_ENGINES[engine.abbreviation] = choose_engine(opts)
        except:
            TEST_ENGINES[engine.abbreviation] = None
            pass

errors = []
for module in MODULE_LIST:
    for (key, value) in list(TEST_ENGINES.items()):
        if module.SCRIPT.name.lower() not in IGNORE:
            if value != None:
                print("==>", module.__name__, value.name, "..........", module.SCRIPT.name)
                try:
                    module.SCRIPT.download(value)
                except KeyboardInterrupt:
                    pass
                except Exception as e:
Ejemplo n.º 17
0
def main():
    """This function launches the Data Retriever."""
    if len(sys.argv) == 1:
        # if no command line args are passed, show the help options
        parser.parse_args(['-h'])

    else:
        # otherwise, parse them
        args = parser.parse_args()
        reset_or_update = args.command in ["reset", "update"]
        if (not reset_or_update and not os.path.isdir(SCRIPT_SEARCH_PATHS[1])
                and not [
                    f for f in os.listdir(SCRIPT_SEARCH_PATHS[-1])
                    if os.path.exists(SCRIPT_SEARCH_PATHS[-1])
                ]):
            check_for_updates()
            reload_scripts()
        script_list = SCRIPT_LIST()

        if args.command == "install" and not args.engine:
            parser.parse_args(["install", "-h"])

        if args.quiet:
            sys.stdout = open(os.devnull, "w")

        if args.command == "help":
            parser.parse_args(["-h"])

        if hasattr(args, "compile") and args.compile:
            script_list = reload_scripts()

        if args.command == "defaults":
            for engine_item in engine_list:
                print("Default options for engine ", engine_item.name)
                for default_opts in engine_item.required_opts:
                    print(default_opts[0], " ", default_opts[2])
                print()
            return

        if args.command == "update":
            check_for_updates()
            reload_scripts()
            return

        if args.command == "citation":
            if args.dataset is None:
                print("\nCitation for retriever:\n")
                print(CITATION)
            else:
                citations = get_script_citation(args.dataset)
                for citation in citations:
                    print("Citation:   {}".format(citation))
            return

        if args.command == 'license':
            if args.dataset is None:
                print(LICENSE)
            else:
                dataset_license = license(args.dataset)
                if dataset_license:
                    print(dataset_license)
                else:
                    print("There is no license information for {}".format(
                        args.dataset))
            return

        if args.command == 'new':
            f = open(args.filename, 'w')
            f.write(sample_script)
            f.close()
            return

        if args.command == 'reset':
            reset_retriever(args.scope)
            return

        if args.command == 'autocreate':
            if args.c:
                url = args.path
                script_list = SCRIPT_LIST()
                flag = 0

                for script in script_list:
                    for dataset in script.tables:
                        if script.tables[dataset].url == url:
                            flag = 1
                            break

                if flag == 1:
                    print("File already exist in dataset " + str(script.name))
                else:
                    print("Dataset is not avaliable, Please download")
                return
            if sum([args.f, args.d]) == 1:
                file_flag = bool(args.f)
                create_package(args.path, args.dt, file_flag, args.o,
                               args.skip_lines, args.e)
            else:
                print('Please use one and only one of the flags -f -d')
            return

        if args.command == 'ls':
            # scripts should never be empty because check_for_updates is run on SCRIPT_LIST init
            if not any([args.l, args.k, args.v, args.s, args.rdataset]):
                all_scripts = dataset_names()
                from retriever import lscolumns

                all_scripts_combined = []
                for dataset in all_scripts['offline']:
                    all_scripts_combined.append((dataset, True))
                for dataset in all_scripts['online']:
                    if dataset in all_scripts['offline']:
                        continue
                    all_scripts_combined.append((dataset, False))
                all_scripts_combined = sorted(all_scripts_combined,
                                              key=lambda x: x[0])
                print("Available datasets : {}\n".format(
                    len(all_scripts_combined)))
                lscolumns.printls(all_scripts_combined)
                print("\nThe symbol * denotes the online datasets.")
                print(
                    "To see the full list of available online datasets, visit\n"
                    "https://github.com/weecology/retriever-recipes.")

            elif isinstance(args.s, list):
                try:
                    theme = INQUIRER_THEME
                except NameError:
                    print("To use retriever ls -s, install inquirer")
                    exit()

                name_list = socrata_autocomplete_search(args.s)
                print("Autocomplete suggestions : Total {} results\n".format(
                    len(name_list)))
                if len(name_list):
                    question = [
                        inquirer.List('dataset name',
                                      message='Select the dataset name',
                                      choices=name_list)
                    ]
                    answer = inquirer.prompt(question,
                                             theme=INQUIRER_THEME,
                                             raise_keyboard_interrupt=True)
                    dataset_name = answer['dataset name']
                    metadata = socrata_dataset_info(dataset_name)

                    print(
                        "Dataset Information of {}: Total {} results\n".format(
                            dataset_name, len(metadata)))

                    for i in range(len(metadata)):
                        print("{}. {}\n \tID : {}\n"
                              "\tType : {}\n"
                              "\tDescription : {}\n"
                              "\tDomain : {}\n \tLink : {}\n".format(
                                  i + 1, metadata[i]["name"],
                                  metadata[i]["id"], metadata[i]["type"],
                                  metadata[i]["description"][:50] + "...",
                                  metadata[i]["domain"], metadata[i]["link"]))

            elif args.rdataset:
                if not isinstance(args.p, list) and not args.all:
                    display_all_rdataset_names()
                elif not isinstance(args.p, list) and args.all:
                    display_all_rdataset_names(package_name='all')
                else:
                    display_all_rdataset_names(package_name=args.p)

            elif isinstance(args.v, list):
                dataset_verbose_list(args.v)

            else:
                param_licenses = args.l if args.l else None
                keywords = args.k if args.k else None

                # search
                searched_scripts = datasets(keywords, param_licenses)
                offline_mesg = "Available offline datasets : {}\n"
                online_mesg = "Available online datasets : {}\n"
                if not searched_scripts:
                    print("No available datasets found")
                else:
                    print(offline_mesg.format(len(
                        searched_scripts['offline'])))
                    count = 1
                    for script in searched_scripts['offline']:
                        print("{count}. {title}\n{name}\n"
                              "{keywords}\n{licenses}\n".format(
                                  count=count,
                                  title=script.title,
                                  name=script.name,
                                  keywords=script.keywords,
                                  licenses=str(script.licenses[0]['name'])
                                  if script.licenses and len(script.licenses)
                                  else str('N/A'),
                              ))
                        count += 1

                    count = 1
                    searched_scripts_offline = [
                        script.name for script in searched_scripts["offline"]
                    ]
                    searched_scripts_online = []
                    for script in searched_scripts['online']:
                        if script in searched_scripts_offline:
                            continue
                        searched_scripts_online.append(script)
                    print(online_mesg.format(len(searched_scripts_online)))
                    for script in searched_scripts_online:
                        print("{count}. {name}".format(count=count,
                                                       name=script))
                        count += 1
            return
        if args.command == 'commit':
            commit(
                dataset=args.dataset,
                path=os.path.normpath(args.path) if args.path else None,
                commit_message=args.message,
            )
            return
        if args.command == 'log':
            commit_log(dataset=args.dataset)
            return

        engine = choose_engine(args.__dict__)

        if hasattr(args, 'debug') and args.debug:
            debug = True
        else:
            debug = False
            sys.tracebacklimit = 0

        if hasattr(args, 'debug') and args.not_cached:
            use_cache = False
        else:
            use_cache = True
        engine.use_cache = use_cache
        if args.dataset is not None:
            if args.dataset.startswith(('socrata', 'rdataset')):
                scripts = True
            else:
                scripts = name_matches(script_list, args.dataset)
        else:
            raise Exception("no dataset specified.")
        if scripts:
            _install(vars(args), debug=debug, use_cache=use_cache)
            print("Done!")
        else:
            print(
                "Run 'retriever ls' to see a list of currently available datasets."
            )
Ejemplo n.º 18
0
def install_modified():
    """Installs modified scripts and returns any errors found"""

    os_password = ""
    if os.name == "nt":
        os_password = "******"

    ignore = [
        "forest-inventory-analysis",
        "bioclim",
        "prism-climate",
        "vertnet",
        "NPN",
        "mammal-super-tree"
    ]
    ignore_list = [dataset.lower() for dataset in ignore]

    modified_scripts = get_modified_scripts()
    if modified_scripts is None:
        print("No new scripts found. Database is up to date.")
        sys.exit()

    engine_list_install = engine_list
    if os.path.exists("test_modified"):
        subprocess.call(['rm', '-r', 'test_modified'])
    os.makedirs("test_modified")
    os.chdir("test_modified")
    dbfile = os.path.normpath(os.path.join(os.getcwd(), 'testdb_retriever.sqlite'))
    engine_test = {
        "postgres": {'engine': 'postgres',
                     'user': '******',
                     'password': os_password,
                     'host': 'localhost',
                     'port': 5432,
                     'database': 'postgres',
                     'database_name': 'testschema',
                     'table_name': '{db}.{table}'},

        "mysql": {'engine': 'mysql',
                  'user': '******',
                  'password': '',
                  'host': 'localhost',
                  'port': 3306,
                  'database_name': 'testdb_retriever',
                  'table_name': '{db}.{table}'},

        "xml": {'engine': 'xml',
                'table_name': 'output_file_{table}.xml'},

        "json": {'engine': 'json',
                 'table_name': 'output_file_{table}.json'},

        "csv": {'engine': 'csv',
                'table_name': 'output_file_{table}.csv'},

        "sqlite": {'engine': 'sqlite',
                   'file': dbfile, 'table_name': '{db}_{table}'}
    }

    test_engines = {}
    for engine in engine_list_install:
        if engine.abbreviation in engine_test:
            try:
                opts = engine_test[engine.abbreviation]
                test_engines[engine.abbreviation] = choose_engine(opts)
            except BaseException:
                test_engines[engine.abbreviation] = None
                pass

    module_list = SCRIPT_LIST()
    errors = []
    for module in module_list:
        for (key, value) in list(test_engines.items()):
            shortname = module.name.lower()
            if module._name in modified_scripts and shortname not in ignore_list:
                if value is not None:
                    print("==>", module._name, value.name, "..........", module.name)
                    try:
                        module.download(value)
                        module.engine.final_cleanup()
                    except KeyboardInterrupt:
                        pass
                    except Exception as e:
                        print("ERROR.")
                        errors.append((key, module._name, e))
                else:
                    errors.append((key, "No connection detected......" + module.name))
    os.chdir("..")
    subprocess.call(['rm', '-r', 'test_modified'])
    return errors
Ejemplo n.º 19
0
def main():
    """This function launches the Data Retriever."""
    if len(sys.argv) == 1:
        # if no command line args are passed, show the help options
        parser.parse_args(['-h'])

    else:
        # otherwise, parse them
        args = parser.parse_args()

        reset_or_update = args.command in ["reset", "update"]
        if (not reset_or_update and not os.path.isdir(SCRIPT_SEARCH_PATHS[1])
                and not [
                    f for f in os.listdir(SCRIPT_SEARCH_PATHS[-1])
                    if os.path.exists(SCRIPT_SEARCH_PATHS[-1])
                ]):
            check_for_updates()
            reload_scripts()
        script_list = SCRIPT_LIST()

        if args.command == "install" and not args.engine:
            parser.parse_args(["install", "-h"])

        if args.quiet:
            sys.stdout = open(os.devnull, "w")

        if args.command == "help":
            parser.parse_args(["-h"])

        if hasattr(args, "compile") and args.compile:
            script_list = reload_scripts()

        if args.command == "defaults":
            for engine_item in engine_list:
                print("Default options for engine ", engine_item.name)
                for default_opts in engine_item.required_opts:
                    print(default_opts[0], " ", default_opts[2])
                print()
            return

        if args.command == "update":
            check_for_updates()
            reload_scripts()
            return

        if args.command == "citation":
            if args.dataset is None:
                print("\nCitation for retriever:\n")
                print(CITATION)
            else:
                citations = get_script_citation(args.dataset)
                for citation in citations:
                    print("Citation:   {}".format(citation))
            return

        if args.command == 'license':
            if args.dataset is None:
                print(LICENSE)
            else:
                dataset_license = license(args.dataset)
                if dataset_license:
                    print(dataset_license)
                else:
                    print("There is no license information for {}".format(
                        args.dataset))
            return

        if args.command == 'new':
            f = open(args.filename, 'w')
            f.write(sample_script)
            f.close()

            return

        if args.command == 'reset':
            reset_retriever(args.scope)
            return

        if args.command == 'autocreate':
            if sum([args.f, args.d]) == 1:
                file_flag = bool(args.f)
                create_package(args.path, args.dt, file_flag, args.o,
                               args.skip_lines, args.e)
            else:
                print('Please use one and only one of the flags -f -d')
            return

        if args.command == 'ls':
            # scripts should never be empty because check_for_updates is run on SCRIPT_LIST init
            if not (args.l or args.k or isinstance(args.v, list)):
                all_scripts = dataset_names()
                from retriever import lscolumns

                all_scripts_combined = []
                for dataset in all_scripts['offline']:
                    all_scripts_combined.append((dataset, True))
                for dataset in all_scripts['online']:
                    if dataset in all_scripts['offline']:
                        continue
                    all_scripts_combined.append((dataset, False))
                all_scripts_combined = sorted(all_scripts_combined,
                                              key=lambda x: x[0])
                print("Available datasets : {}\n".format(
                    len(all_scripts_combined)))
                lscolumns.printls(all_scripts_combined)
                print("\nThe symbol * denotes the online datasets.")
                print(
                    "To see the full list of available online datasets, visit\n"
                    "https://github.com/weecology/retriever-recipes.")

            elif isinstance(args.v, list):
                online_scripts = []
                if args.v:
                    try:
                        all_scripts = [
                            get_script(dataset) for dataset in args.v
                        ]
                    except KeyError:
                        all_scripts = []
                        print("Dataset(s) is not found.")
                else:
                    scripts = datasets()
                    all_scripts = scripts['offline']
                    online_scripts = scripts['online']
                count = 1
                if not args.v:
                    print("Offline datasets : {}\n".format(len(all_scripts)))
                for script in all_scripts:
                    print("{count}. {title}\n {name}\n"
                          "{keywords}\n{description}\n"
                          "{licenses}\n{citation}\n"
                          "".format(
                              count=count,
                              title=script.title,
                              name=script.name,
                              keywords=script.keywords,
                              description=script.description,
                              licenses=str(script.licenses[0]['name']),
                              citation=script.citation,
                          ))
                    count += 1

                count = 1
                offline_scripts = [script.name for script in all_scripts]
                set_online_scripts = []
                for script in online_scripts:
                    if script in offline_scripts:
                        continue
                    set_online_scripts.append(script)
                if not args.v:
                    print("Online datasets : {}\n".format(
                        len(set_online_scripts)))
                for script in set_online_scripts:
                    print("{count}. {name}".format(count=count, name=script))
                    count += 1
            else:
                param_licenses = args.l if args.l else None
                keywords = args.k if args.k else None

                # search
                searched_scripts = datasets(keywords, param_licenses)
                offline_mesg = "Available offline datasets : {}\n"
                online_mesg = "Available online datasets : {}\n"
                if not searched_scripts:
                    print("No available datasets found")
                else:
                    print(offline_mesg.format(len(
                        searched_scripts['offline'])))
                    count = 1
                    for script in searched_scripts['offline']:
                        print("{count}. {title}\n{name}\n"
                              "{keywords}\n{licenses}\n".format(
                                  count=count,
                                  title=script.title,
                                  name=script.name,
                                  keywords=script.keywords,
                                  licenses=str(script.licenses[0]['name']),
                              ))
                        count += 1

                    count = 1
                    searched_scripts_offline = [
                        script.name for script in searched_scripts["offline"]
                    ]
                    searched_scripts_online = []
                    for script in searched_scripts['online']:
                        if script in searched_scripts_offline:
                            continue
                        searched_scripts_online.append(script)
                    print(online_mesg.format(len(searched_scripts_online)))
                    for script in searched_scripts_online:
                        print("{count}. {name}".format(count=count,
                                                       name=script))
                        count += 1
            return
        if args.command == 'commit':
            commit(
                dataset=args.dataset,
                path=os.path.normpath(args.path) if args.path else None,
                commit_message=args.message,
            )
            return
        if args.command == 'log':
            commit_log(dataset=args.dataset)
            return

        engine = choose_engine(args.__dict__)

        if hasattr(args, 'debug') and args.debug:
            debug = True
        else:
            debug = False
            sys.tracebacklimit = 0

        if hasattr(args, 'debug') and args.not_cached:
            use_cache = False
        else:
            use_cache = True
        engine.use_cache = use_cache
        if args.dataset is not None:
            scripts = name_matches(script_list, args.dataset)
        else:
            raise Exception("no dataset specified.")
        if scripts:
            if args.dataset.endswith('.zip') or hasattr(args, 'hash_value'):
                _install(vars(args), debug=debug, use_cache=use_cache)
                return
            for dataset in scripts:
                print("=> Installing", dataset.name)
                try:
                    dataset.download(engine, debug=debug)
                    dataset.engine.final_cleanup()
                except KeyboardInterrupt:
                    pass
                except Exception as e:
                    print(e)
                    if debug:
                        raise
            print("Done!")
        else:
            print(
                "Run 'retriever ls' to see a list of currently available datasets."
            )
Ejemplo n.º 20
0
def install_modified():
    """Installs modified scripts and returns any errors found"""

    os_password = ""
    if os.name == "nt":
        os_password = "******"

    ignore = ["bioclim"]
    ignore_list = [dataset.lower() for dataset in ignore]

    modified_scripts = get_modified_scripts()
    if modified_scripts is None:
        print("No new scripts found. Database is up to date.")
        sys.exit()

    engine_list_install = engine_list
    if os.path.exists("test_modified"):
        subprocess.call(['rm', '-r', 'test_modified'])
    os.makedirs("test_modified")
    os.chdir("test_modified")
    dbfile = os.path.normpath(
        os.path.join(os.getcwd(), 'testdb_retriever.sqlite'))

    engine_test = {
        "postgres": {
            'engine': 'postgres',
            'user': '******',
            'password': os_password,
            'host': 'localhost',
            'port': 5432,
            'database': 'postgres',
            'database_name': 'testschema',
            'table_name': '{db}.{table}'
        },
        "mysql": {
            'engine': 'mysql',
            'user': '******',
            'password': '',
            'host': 'localhost',
            'port': 3306,
            'database_name': 'testdb_retriever',
            'table_name': '{db}.{table}'
        },
        "xml": {
            'engine': 'xml',
            'table_name': '{db}_{table}.xml'
        },
        "json": {
            'engine': 'json',
            'table_name': '{db}_{table}.json'
        },
        "csv": {
            'engine': 'csv',
            'table_name': '{db}_{table}.csv'
        },
        "sqlite": {
            'engine': 'sqlite',
            'file': dbfile,
            'table_name': '{db}_{table}'
        }
    }

    test_engines = {}
    for engine in engine_list_install:
        if engine.abbreviation in engine_test:
            try:
                opts = engine_test[engine.abbreviation]
                test_engines[engine.abbreviation] = choose_engine(opts)
            except BaseException:
                test_engines[engine.abbreviation] = None
                pass

    module_list = SCRIPT_LIST()
    errors = []
    for module in module_list:
        for (key, value) in list(test_engines.items()):
            shortname = module.name.lower()
            if module._name in modified_scripts and shortname not in ignore_list:
                if value is not None:
                    print("==>", module._name, value.name, "..........",
                          module.name)
                    try:
                        module.download(value)
                        module.engine.final_cleanup()
                    except KeyboardInterrupt:
                        pass
                    except Exception as e:
                        print("ERROR.")
                        errors.append((key, module._name, e))
                else:
                    errors.append(
                        (key, "No connection detected......" + module.name))
    os.chdir("..")
    subprocess.call(['rm', '-r', 'test_modified'])
    return errors
Ejemplo n.º 21
0
def main():
    """This function launches the Data Retriever."""
    sys.argv[1:] = [arg.lower() for arg in sys.argv[1:]]
    if len(sys.argv) == 1:
        # if no command line args are passed, show the help options
        parser.parse_args(['-h'])

    else:
        # otherwise, parse them

        if not os.path.isdir(SCRIPT_SEARCH_PATHS[1]) and not \
                [f for f in os.listdir(SCRIPT_SEARCH_PATHS[-1])
                 if os.path.exists(SCRIPT_SEARCH_PATHS[-1])]:
            check_for_updates()
        script_list = SCRIPT_LIST()

        args = parser.parse_args()

        if args.command == "install" and not args.engine:
            parser.parse_args(['install', '-h'])

        if args.quiet:
            sys.stdout = open(os.devnull, 'w')

        if args.command == 'help':
            parser.parse_args(['-h'])

        if hasattr(args, 'compile') and args.compile:
            script_list = SCRIPT_LIST(force_compile=True)

        if args.command == 'defaults':
            for engine_item in engine_list:
                print("Default options for engine ", engine_item.name)
                for default_opts in engine_item.required_opts:
                    print(default_opts[0], " ", default_opts[2])
                print()
            return

        if args.command == 'update':
            check_for_updates(False)
            script_list = SCRIPT_LIST()
            return

        elif args.command == 'citation':
            if args.dataset is None:
                print("\nCitation for retriever:\n")
                print(CITATION)
            else:
                scripts = name_matches(script_list, args.dataset)
                for dataset in scripts:
                    print("\nDataset:  {}".format(dataset.name))
                    print("Citation:   {}".format(dataset.citation))
                    print("Description:   {}\n".format(dataset.description))

            return

        elif args.command == 'license':
            dataset_license = license(args.dataset)
            if dataset_license:
                print(dataset_license)
            else:
                print("There is no license information for {}".format(args.dataset))
            return

        elif args.command == 'new':
            f = open(args.filename, 'w')
            f.write(sample_script)
            f.close()

            return

        elif args.command == 'reset':
            reset_retriever(args.scope)
            return

        elif args.command == 'new_json':
            # create new JSON script
            create_json()
            return

        elif args.command == 'edit_json':
            # edit existing JSON script
            json_file = get_script_filename(args.dataset.lower())
            edit_json(json_file)
            return

        elif args.command == 'delete_json':
            # delete existing JSON script from home directory and or script directory if exists in current dir
            confirm = input("Really remove " + args.dataset.lower() +
                            " and all its contents? (y/N): ")
            if confirm.lower().strip() in ['y', 'yes']:
                json_file = get_script_filename(args.dataset.lower())
                delete_json(json_file)
            return

        if args.command == 'ls':
            # If scripts have never been downloaded there is nothing to list
            if not script_list:
                print("No scripts are currently available. Updating scripts now...")
                check_for_updates(False)
                print("\n\nScripts downloaded.\n")

            if args.l is None:
                all_scripts = datasets()
                print("Available datasets : {}\n".format(len(all_scripts)))
                from retriever import lscolumns
                lscolumns.printls(dataset_names())
            else:
                all_scripts = datasets(args.l[0])
                print("Available datasets : {}\n".format(len(all_scripts)))
                count = 1
                for script in all_scripts:
                    print("{}. {}".format(count, script.title))
                    print(script.name)
                    print(script.keywords)
                    print()
                    count += 1
            return

        engine = choose_engine(args.__dict__)

        if hasattr(args, 'debug') and args.debug:
            debug = True
        else:
            debug = False
            sys.tracebacklimit = 0

        if hasattr(args, 'debug') and args.not_cached:
            engine.use_cache = False
        else:
            engine.use_cache = True

        if args.dataset is not None:
            scripts = name_matches(script_list, args.dataset)
        else:
            raise Exception("no dataset specified.")
        if scripts:
            for dataset in scripts:
                print("=> Installing", dataset.name)
                try:
                    dataset.download(engine, debug=debug)
                    dataset.engine.final_cleanup()
                except KeyboardInterrupt:
                    pass
                except Exception as e:
                    print(e)
                    if debug:
                        raise
            print("Done!")
        else:
            print("The dataset {} isn't currently available in the Retriever".format(
                args.dataset))
            print("Run 'retriever ls to see a list of currently available datasets")
Ejemplo n.º 22
0
def _install(args, use_cache, debug):
    """Install datasets for retriever."""
    engine = choose_engine(args)
    engine.use_cache = use_cache

    if args['dataset'].endswith('.zip') or args.get('hash_value'):
        path_to_archive = args['dataset']
        if args.get('hash_value'):
            path_to_archive = os.path.join(
                PROVENANCE_DIR, args['dataset'],
                '{}-{}.zip'.format(args['dataset'], args['hash_value']))
        if not os.path.exists(path_to_archive):
            print('The committed file does not exist.')
        engine = install_committed(path_to_archive,
                                   engine,
                                   force=args.get('force', False))
        return engine
    script_list = SCRIPT_LIST()
    if not (script_list or os.listdir(SCRIPT_WRITE_PATH)):
        check_for_updates()
        script_list = SCRIPT_LIST()
    data_sets_scripts = name_matches(script_list, args['dataset'])
    if data_sets_scripts:
        for data_sets_script in data_sets_scripts:
            print("=> Installing", data_sets_script.name)
            try:
                if engine.name == "HDF5":
                    sqlite_opts = {
                        'command': 'install',
                        'dataset': data_sets_script,
                        'engine': 'sqlite',
                        'file': (args["file"].split("."))[0] + ".db",
                        'table_name': args["table_name"],
                        'data_dir': args["data_dir"]
                    }
                    sqlite_engine = choose_engine(sqlite_opts)
                    data_sets_script.download(sqlite_engine, debug=debug)
                    data_sets_script.engine.final_cleanup()
                engine.script_table_registry = OrderedDict()
                data_sets_script.download(engine, debug=debug)
                data_sets_script.engine.final_cleanup()
            except Exception as e:
                print(e)
                if debug:
                    raise
    elif args['dataset'].startswith('socrata') and not data_sets_scripts:
        socrata_id = args['dataset'].split('-', 1)[1]
        resource = find_socrata_dataset_by_id(socrata_id)

        if "error" in resource.keys():
            if resource["datatype"][0] == "map":
                print("{} because map type datasets are not supported".format(
                    resource["error"]))
            else:
                print("{} because it is of type {} and not tabular".format(
                    resource["error"], resource["datatype"][1]))
        elif len(resource.keys()) == 0:
            return
        else:
            print("=> Installing", args['dataset'])
            name = f"socrata-{socrata_id}"
            create_socrata_dataset(engine, name, resource)
            if args['command'] == 'download':
                return engine
            else:
                script_list = SCRIPT_LIST()
                script = get_script(args['dataset'])
                script.download(engine, debug=debug)
                script.engine.final_cleanup()
    elif args['dataset'].startswith('rdataset') and not data_sets_scripts:
        print("=> Installing", args['dataset'])
        rdataset = args['dataset'].split('-')
        update_rdataset_catalog()
        package, dataset_name = rdataset[1], rdataset[2]
        create_rdataset(engine, package, dataset_name)
        if args['command'] == 'download':
            return engine
        else:
            script_list = SCRIPT_LIST()
            script = get_script(args['dataset'])
            script.download(engine, debug=debug)
            script.engine.final_cleanup()
    else:
        message = "Run retriever.datasets() to list the currently available " \
                  "datasets."
        raise ValueError(message)
    return engine
Ejemplo n.º 23
0
def main():
    """This function launches the Data Retriever."""
    sys.argv[1:] = [arg.lower() for arg in sys.argv[1:]]
    if len(sys.argv) == 1:
        # if no command line args are passed, show the help options
        parser.parse_args(['-h'])

    else:
        # otherwise, parse them

        script_list = SCRIPT_LIST()

        args = parser.parse_args()

        if args.command == "install" and not args.engine:
            parser.parse_args(['install', '-h'])

        if args.quiet:
            sys.stdout = open(os.devnull, 'w')

        if args.command == 'help':
            parser.parse_args(['-h'])

        if hasattr(args, 'compile') and args.compile:
            script_list = SCRIPT_LIST(force_compile=True)

        if args.command == 'defaults':
            for engine_item in engine_list:
                print("Default options for engine ", engine_item.name)
                for default_opts in engine_item.required_opts:
                    print(default_opts[0], " ", default_opts[2])
                print()
            return

        if args.command == 'update':
            check_for_updates(False)
            script_list = SCRIPT_LIST()
            return

        elif args.command == 'citation':
            if args.dataset is None:
                print("\nCitation for retriever:\n")
                print(CITATION)
            else:
                scripts = name_matches(script_list, args.dataset)
                for dataset in scripts:
                    print("\nDataset:  {}".format(dataset.name))
                    print("Citation:   {}".format(dataset.citation))
                    print("Description:   {}\n".format(dataset.description))

            return

        elif args.command == 'license':
            dataset_license = license(args.dataset)
            if dataset_license:
                print(dataset_license)
            else:
                print("There is no license information for {}".format(args.dataset))
            return

        elif args.command == 'new':
            f = open(args.filename, 'w')
            f.write(sample_script)
            f.close()

            return

        elif args.command == 'reset':
            reset_retriever(args.scope)
            return

        elif args.command == 'new_json':
            # create new JSON script
            create_json()
            return

        elif args.command == 'edit_json':
            # edit existing JSON script
            json_file = get_script_filename(args.dataset.lower())
            edit_json(json_file)
            return

        elif args.command == 'delete_json':
            # delete existing JSON script from home directory and or script directory if exists in current dir
            confirm = input("Really remove " + args.dataset.lower() +
                            " and all its contents? (y/N): ")
            if confirm.lower().strip() in ['y', 'yes']:
                json_file = get_script_filename(args.dataset.lower())
                delete_json(json_file)
            return

        if args.command == 'ls':
            # If scripts have never been downloaded there is nothing to list
            if not script_list:
                print("No scripts are currently available. Updating scripts now...")
                check_for_updates(False)
                print("\n\nScripts downloaded.\n")

            if args.l is None:
                all_scripts = datasets()
                print("Available datasets : {}\n".format(len(all_scripts)))
                from retriever import lscolumns
                lscolumns.printls(dataset_names())
            else:
                all_scripts = datasets(args.l[0])
                print("Available datasets : {}\n".format(len(all_scripts)))
                count = 1
                for script in all_scripts:
                    print("{}. {}".format(count, script.title))
                    print(script.name)
                    print(script.keywords)
                    print()
                    count += 1
            return

        engine = choose_engine(args.__dict__)

        if hasattr(args, 'debug') and args.debug:
            debug = True
        else:
            debug = False
            sys.tracebacklimit = 0

        if hasattr(args, 'debug') and args.not_cached:
            engine.use_cache = False
        else:
            engine.use_cache = True

        if args.dataset is not None:
            scripts = name_matches(script_list, args.dataset)
        else:
            raise Exception("no dataset specified.")
        if scripts:
            for dataset in scripts:
                print("=> Installing", dataset.name)
                try:
                    dataset.download(engine, debug=debug)
                    dataset.engine.final_cleanup()
                except KeyboardInterrupt:
                    pass
                except Exception as e:
                    print(e)
                    if debug:
                        raise
            print("Done!")
        else:
            print("The dataset {} isn't currently available in the Retriever".format(
                args.dataset))
            print("Run 'retriever ls to see a list of currently available datasets")