示例#1
0
def retrain_dispatcher(args=sys.argv[1:]):
    """Main processing of the parsed options for BigMLer retrain

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    # parses the command line to get the context args and the log files to use
    command_args, command, api, session_file, resume = get_context(
        args, SETTINGS)

    # --id or --model-tag, --ensemble-tag, etc. is compulsory
    if check_compulsory_options(command.flags, command_args):
        retrain_model(command_args,
                      api,
                      command.common_options,
                      session_file=session_file)
        u.log_message("_" * 80 + "\n", log_file=session_file)
    else:
        sys.exit("You must provide the ID of the resource to be"
                 " retrained in the --id option or a unique tag"
                 " to retrieve such ID."
                 " Type bigmler retrain --help\n"
                 " to see all the available options.")
示例#2
0
def connector_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    command_args, _, api, session_file, _ = get_context(args, SETTINGS)

    path = u.check_dir(command_args.output)
    log = None
    if command_args.log_file:
        u.check_dir(command_args.log_file)
        log = command_args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])
    if not command_args.external_connector_id and \
            u.has_connection_info(command_args):
        # create connector
        pec.connector_processing(api,
                                 command_args,
                                 command_args.resume,
                                 session_file=session_file,
                                 path=path,
                                 log=log)
    if command_args.external_connector_id and (
            command_args.connector_attributes or command_args.name
            or command_args.tag or command_args.description
            or command_args.category):
        # update connector's attributes
        pec.update_external_connector(command_args, api, command_args.resume, \
            session_file=session_file)

    u.log_message("_" * 80 + "\n", log_file=session_file)
    u.print_generated_files(command_args.output_dir,
                            log_file=session_file,
                            verbosity=command_args.verbosity)
示例#3
0
def reify_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command_args, _, api, session_file, _ = get_context(args, SETTINGS)

    def logger(message):
        """Partial to log messages according to args.verbosity

        """
        u.log_message(u.dated(message), \
            log_file=session_file, console=command_args.verbosity)

    message = "Starting reification for %s\n\n" % command_args.resource_id
    u.log_message(message, \
        log_file=session_file, console=command_args.verbosity)
    reify_resources(command_args, api)
    message = "\nReification complete. See the results in %s\n\n" % \
        command_args.output
    u.log_message(message, \
        log_file=session_file, console=command_args.verbosity)
    u.log_message("_" * 80 + "\n", log_file=session_file)

    u.print_generated_files(command_args.output_dir,
                            log_file=session_file,
                            verbosity=command_args.verbosity)
示例#4
0
def project_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    command_args, command, api, session_file, resume = get_context(args,
                                                                   SETTINGS)

    path = u.check_dir(command_args.output)
    log = None
    if command_args.log_file:
        u.check_dir(command_args.log_file)
        log = command_args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])
    if not command_args.project_id and command_args.name:
        command_args.project = command_args.name
    if command_args.project:
        # create project
        pp.project_processing(
            api, command_args, command_args.resume, session_file=session_file,
            path=path, log=log, create=True)
    if command_args.project_id and (
            command_args.project_attributes or
            command_args.name or command_args.tag or command_args.description
            or command_args.category):
        # update project's attributes
        pp.update_project(command_args, api, command_args.resume, \
            session_file=session_file)

    u.log_message("_" * 80 + "\n", log_file=session_file)
    u.print_generated_files(command_args.output_dir, log_file=session_file,
                            verbosity=command_args.verbosity)
示例#5
0
def reify_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command_args, command, api, session_file, resume = get_context(args,
                                                                   SETTINGS)

    def logger(message):
        """Partial to log messages according to args.verbosity

        """
        u.log_message(u.dated(message), \
            log_file=session_file, console=command_args.verbosity)

    print command_args.output, command_args.output_dir
    message = "Starting reification for %s\n\n" % command_args.resource_id
    u.log_message(message, \
        log_file=session_file, console=command_args.verbosity)
    reify_resources(command_args, api, logger)
    message = "\nReification complete. See the results in %s\n\n" % \
        command_args.output
    u.log_message(message, \
        log_file=session_file, console=command_args.verbosity)
    u.log_message("_" * 80 + "\n", log_file=session_file)

    u.print_generated_files(command_args.output_dir, log_file=session_file,
                            verbosity=command_args.verbosity)
示例#6
0
def delete_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    command = command_handling(args, COMMAND_LOG)

    # Parses command line arguments.
    command_args = a.parse_and_check(command)
    if command_args.resume:
        command_args, session_file, _ = get_stored_command(
            args, command_args.debug, command_log=COMMAND_LOG,
            dirs_log=DIRS_LOG, sessions_log=SESSIONS_LOG)
    else:
        if command_args.output_dir is None:
            command_args.output_dir = a.NOW
        directory = u.check_dir(os.path.join(command_args.output_dir, "tmp"))
        session_file = os.path.join(directory, SESSIONS_LOG)
        u.log_message(command.command + "\n", log_file=session_file)
        try:
            shutil.copy(DEFAULTS_FILE, os.path.join(directory, DEFAULTS_FILE))
        except IOError:
            pass
        u.sys_log_message(u"%s\n" % os.path.abspath(directory),
                          log_file=DIRS_LOG)

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    # Creates the corresponding api instance
    api = a.get_api_instance(command_args, u.check_dir(session_file))

    delete_resources(command_args, api)
    u.log_message("_" * 80 + "\n", log_file=session_file)
示例#7
0
def delete_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    command = command_handling(args, COMMAND_LOG)

    # Parses command line arguments.
    command_args = a.parse_and_check(command)
    if command_args.resume:
        command_args, session_file, _ = get_stored_command(
            args, command_args.debug, command_log=COMMAND_LOG,
            dirs_log=DIRS_LOG, sessions_log=SESSIONS_LOG)
    else:
        if command_args.output_dir is None:
            command_args.output_dir = a.NOW
        directory = u.check_dir(os.path.join(command_args.output_dir, "tmp"))
        session_file = os.path.join(directory, SESSIONS_LOG)
        u.log_message(command.command + "\n", log_file=session_file)
        try:
            shutil.copy(DEFAULTS_FILE, os.path.join(directory, DEFAULTS_FILE))
        except IOError:
            pass
        u.sys_log_message(u"%s\n" % os.path.abspath(directory),
                          log_file=DIRS_LOG)

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    # Creates the corresponding api instance
    api = a.get_api_instance(command_args, u.check_dir(session_file))

    delete_resources(command_args, api)
    u.log_message("_" * 80 + "\n", log_file=session_file)
示例#8
0
def project_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    command_args, command, api, session_file, resume = get_context(args,
                                                                   SETTINGS)

    path = u.check_dir(command_args.output)
    log = None
    if command_args.log_file:
        u.check_dir(command_args.log_file)
        log = command_args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])
    if not command_args.project_id and command_args.name:
        command_args.project = command_args.name
    if command_args.project:
        # create project
        pp.project_processing(
            api, command_args, command_args.resume, session_file=session_file,
            path=path, log=log, create=True)
    if command_args.project_id and (
            command_args.project_attributes or
            command_args.name or command_args.tag or command_args.description
            or command_args.category):
        # update project's attributes
        pp.update_project(command_args, api, command_args.resume, \
            session_file=session_file)

    u.log_message("_" * 80 + "\n", log_file=session_file)
    u.print_generated_files(command_args.output_dir, log_file=session_file,
                            verbosity=command_args.verbosity)
示例#9
0
def cluster_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command = command_handling(args, COMMAND_LOG)

    # Parses command line arguments.
    command_args = a.parse_and_check(command)
    resume = command_args.resume
    if command_args.resume:
        # Keep the debug option if set
        debug = command_args.debug
        # Restore the args of the call to resume from the command log file
        stored_command = StoredCommand(args, COMMAND_LOG, DIRS_LOG)
        command = Command(None, stored_command=stored_command)
        # Logs the issued command and the resumed command
        session_file = os.path.join(stored_command.output_dir, SESSIONS_LOG)
        stored_command.log_command(session_file=session_file)
        # Parses resumed arguments.
        command_args = a.parse_and_check(command)
        if command_args.predictions is None:
            command_args.predictions = os.path.join(stored_command.output_dir, DEFAULT_OUTPUT)
    else:
        if command_args.output_dir is None:
            command_args.output_dir = a.NOW
        if command_args.predictions is None:
            command_args.predictions = os.path.join(command_args.output_dir, DEFAULT_OUTPUT)
        if len(os.path.dirname(command_args.predictions).strip()) == 0:
            command_args.predictions = os.path.join(command_args.output_dir, command_args.predictions)
        directory = u.check_dir(command_args.predictions)
        session_file = os.path.join(directory, SESSIONS_LOG)
        u.log_message(command.command + "\n", log_file=session_file)
        try:
            defaults_file = open(DEFAULTS_FILE, "r")
            contents = defaults_file.read()
            defaults_file.close()
            defaults_copy = open(os.path.join(directory, DEFAULTS_FILE), "w", 0)
            defaults_copy.write(contents)
            defaults_copy.close()
        except IOError:
            pass
        u.sys_log_message(u"%s\n" % os.path.abspath(directory), log_file=DIRS_LOG)

    # Creates the corresponding api instance
    if resume and debug:
        command_args.debug = True
    api = a.get_api_instance(command_args, u.check_dir(session_file))

    # Selects the action to perform
    if has_train(command_args) or has_test(command_args) or command_args.cluster_datasets is not None:
        output_args = a.get_output_args(api, command_args, resume)
        a.transform_args(command_args, command.flags, api, command.user_defaults)
        compute_output(**output_args)
    u.log_message("_" * 80 + "\n", log_file=session_file)
示例#10
0
def cluster_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command = command_handling(args, COMMAND_LOG)

    # Parses command line arguments.
    command_args = a.parse_and_check(command)
    resume = command_args.resume
    if command_args.resume:
        command_args, session_file, output_dir = get_stored_command(
            args,
            command_args.debug,
            command_log=COMMAND_LOG,
            dirs_log=DIRS_LOG,
            sessions_log=SESSIONS_LOG)
        if command_args.predictions is None:
            command_args.predictions = os.path.join(output_dir, DEFAULT_OUTPUT)
    else:
        if command_args.output_dir is None:
            command_args.output_dir = a.NOW
        if command_args.predictions is None:
            command_args.predictions = os.path.join(command_args.output_dir,
                                                    DEFAULT_OUTPUT)
        if len(os.path.dirname(command_args.predictions).strip()) == 0:
            command_args.predictions = os.path.join(command_args.output_dir,
                                                    command_args.predictions)
        directory = u.check_dir(command_args.predictions)
        session_file = os.path.join(directory, SESSIONS_LOG)
        u.log_message(command.command + "\n", log_file=session_file)
        try:
            defaults_file = open(DEFAULTS_FILE, 'r')
            contents = defaults_file.read()
            defaults_file.close()
            defaults_copy = open(os.path.join(directory, DEFAULTS_FILE), 'w',
                                 0)
            defaults_copy.write(contents)
            defaults_copy.close()
        except IOError:
            pass
        u.sys_log_message(u"%s\n" % os.path.abspath(directory),
                          log_file=DIRS_LOG)

    # Creates the corresponding api instance
    api = a.get_api_instance(command_args, u.check_dir(session_file))

    # Selects the action to perform
    if (a.has_train(command_args) or a.has_test(command_args)
            or command_args.cluster_datasets is not None):
        output_args = a.get_output_args(api, command_args, resume)
        a.transform_args(command_args, command.flags, api,
                         command.user_defaults)
        compute_output(**output_args)
    u.log_message("_" * 80 + "\n", log_file=session_file)
示例#11
0
def anomaly_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command = command_handling(args, COMMAND_LOG)

    # Parses command line arguments.
    command_args = a.parse_and_check(command)
    resume = command_args.resume
    if command_args.resume:
        command_args, session_file, output_dir = get_stored_command(
            args, command_args.debug, command_log=COMMAND_LOG,
            dirs_log=DIRS_LOG, sessions_log=SESSIONS_LOG)
        if command_args.predictions is None:
            command_args.predictions = os.path.join(output_dir,
                                                    DEFAULT_OUTPUT)
    else:
        if command_args.output_dir is None:
            command_args.output_dir = a.NOW
        if command_args.predictions is None:
            command_args.predictions = os.path.join(command_args.output_dir,
                                                    DEFAULT_OUTPUT)
        if len(os.path.dirname(command_args.predictions).strip()) == 0:
            command_args.predictions = os.path.join(command_args.output_dir,
                                                    command_args.predictions)
        directory = u.check_dir(command_args.predictions)
        session_file = os.path.join(directory, SESSIONS_LOG)
        u.log_message(command.command + "\n", log_file=session_file)
        try:
            defaults_file = open(DEFAULTS_FILE, 'r')
            contents = defaults_file.read()
            defaults_file.close()
            defaults_copy = open(os.path.join(directory, DEFAULTS_FILE),
                                 'w', 0)
            defaults_copy.write(contents)
            defaults_copy.close()
        except IOError:
            pass
        u.sys_log_message(u"%s\n" % os.path.abspath(directory),
                          log_file=DIRS_LOG)

    # Creates the corresponding api instance
    api = a.get_api_instance(command_args, u.check_dir(session_file))

    # Selects the action to perform
    if (a.has_train(command_args) or a.has_test(command_args) or
        command_args.score or
        a.has_anomaly(command_args)):
        output_args = a.get_output_args(api, command_args, resume)
        a.transform_args(command_args, command.flags, api,
                         command.user_defaults)
        compute_output(**output_args)
    u.log_message("_" * 80 + "\n", log_file=session_file)
示例#12
0
def logistic_regression_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command = command_handling(args, COMMAND_LOG)

    # Parses command line arguments.
    command_args = a.parse_and_check(command)
    default_output = ('evaluation'
                      if command_args.evaluate else 'predictions.csv')
    resume = command_args.resume
    if command_args.resume:
        command_args, session_file, output_dir = get_stored_command(
            args,
            command_args.debug,
            command_log=COMMAND_LOG,
            dirs_log=DIRS_LOG,
            sessions_log=SESSIONS_LOG)
        default_output = ('evaluation'
                          if command_args.evaluate else 'predictions.csv')
        if command_args.predictions is None:
            command_args.predictions = os.path.join(output_dir, default_output)
    else:
        if command_args.output_dir is None:
            command_args.output_dir = a.NOW
        if command_args.predictions is None:
            command_args.predictions = os.path.join(command_args.output_dir,
                                                    default_output)
        if len(os.path.dirname(command_args.predictions).strip()) == 0:
            command_args.predictions = os.path.join(command_args.output_dir,
                                                    command_args.predictions)
        directory = u.check_dir(command_args.predictions)
        session_file = os.path.join(directory, SESSIONS_LOG)
        u.log_message(command.command + "\n", log_file=session_file)
        try:
            shutil.copy(DEFAULTS_FILE, os.path.join(directory, DEFAULTS_FILE))
        except IOError:
            pass
        u.sys_log_message(u"%s\n" % os.path.abspath(directory),
                          log_file=DIRS_LOG)

    # Creates the corresponding api instance
    api = a.get_api_instance(command_args, u.check_dir(session_file))

    # Selects the action to perform
    if (a.has_train(command_args) or a.has_test(command_args)
            or command_args.export_fields):
        output_args = a.get_output_args(api, command_args, resume)
        a.transform_args(command_args, command.flags, api,
                         command.user_defaults)
        compute_output(**output_args)
    u.log_message("_" * 80 + "\n", log_file=session_file)
示例#13
0
def whizzml_dispatcher(args=sys.argv[1:]):
    """Main processing of the parsed options for BigMLer whizzml

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command = command_handling(args, COMMAND_LOG)

    # Parses command line arguments.
    command_args = command.parser.parse_args(command.args)
    resume = command_args.resume
    if resume:
        command_args, session_file, _ = get_stored_command(
            args,
            command_args.debug,
            command_log=COMMAND_LOG,
            dirs_log=DIRS_LOG,
            sessions_log=SESSIONS_LOG)
    else:
        if command_args.output_dir is None:
            command_args.output_dir = a.NOW
        session_file = os.path.join(command_args.output_dir, SESSIONS_LOG)
        # If logging is required, open the file for logging
        log = None
        if command_args.log_file:
            u.check_dir(command_args.log_file)
            log = command_args.log_file
            # If --clear_logs the log files are cleared
            if command_args.clear_logs:
                clear_log_files([log])

        u.sys_log_message(u"%s\n" % os.path.abspath(command_args.output_dir),
                          log_file=DIRS_LOG)
        session_file = os.path.join(command_args.output_dir, SESSIONS_LOG)
    # create api instance form args
    api = a.get_api_instance(command_args, u.check_dir(session_file))

    # Creates the corresponding api instance
    api = a.get_api_instance(command_args, u.check_dir(session_file))
    a.transform_dataset_options(command_args, api)

    # package_dir
    if command_args.package_dir is not None:
        create_package(command_args,
                       api,
                       command.common_options,
                       resume=resume)
    else:
        sys.exit("You must use the --package-dir flag pointing to the"
                 " directory where the metadata.json file is. Type\n"
                 "    bigmler whizzml --help\n"
                 " to see all the available options.")
示例#14
0
def logistic_regression_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command = command_handling(args, COMMAND_LOG)

    # Parses command line arguments.
    command_args = a.parse_and_check(command)
    default_output = ('evaluation' if command_args.evaluate
                      else 'predictions.csv')
    resume = command_args.resume
    if command_args.resume:
        command_args, session_file, output_dir = get_stored_command(
            args, command_args.debug, command_log=COMMAND_LOG,
            dirs_log=DIRS_LOG, sessions_log=SESSIONS_LOG)
        default_output = ('evaluation' if command_args.evaluate
                          else 'predictions.csv')
        if command_args.predictions is None:
            command_args.predictions = os.path.join(output_dir,
                                                    default_output)
    else:
        if command_args.output_dir is None:
            command_args.output_dir = a.NOW
        if command_args.predictions is None:
            command_args.predictions = os.path.join(command_args.output_dir,
                                                    default_output)
        if len(os.path.dirname(command_args.predictions).strip()) == 0:
            command_args.predictions = os.path.join(command_args.output_dir,
                                                    command_args.predictions)
        directory = u.check_dir(command_args.predictions)
        session_file = os.path.join(directory, SESSIONS_LOG)
        u.log_message(command.command + "\n", log_file=session_file)
        try:
            shutil.copy(DEFAULTS_FILE, os.path.join(directory, DEFAULTS_FILE))
        except IOError:
            pass
        u.sys_log_message(u"%s\n" % os.path.abspath(directory),
                          log_file=DIRS_LOG)

    # Creates the corresponding api instance
    api = a.get_api_instance(command_args, u.check_dir(session_file))

    # Selects the action to perform
    if (a.has_train(command_args) or a.has_test(command_args)
            or command_args.export_fields):
        output_args = a.get_output_args(api, command_args, resume)
        a.transform_args(command_args, command.flags, api,
                         command.user_defaults)
        compute_output(**output_args)
    u.log_message("_" * 80 + "\n", log_file=session_file)
示例#15
0
def whizzml_dispatcher(args=sys.argv[1:]):
    """Main processing of the parsed options for BigMLer whizzml

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command = command_handling(args, COMMAND_LOG)

    # Parses command line arguments.
    command_args = command.parser.parse_args(command.args)
    resume = command_args.resume
    if resume:
        command_args, session_file, _ = get_stored_command(
            args, command_args.debug, command_log=COMMAND_LOG,
            dirs_log=DIRS_LOG, sessions_log=SESSIONS_LOG)
    else:
        if command_args.output_dir is None:
            command_args.output_dir = a.NOW
        session_file = os.path.join(command_args.output_dir,
                                    SESSIONS_LOG)
        # If logging is required, open the file for logging
        log = None
        if command_args.log_file:
            u.check_dir(command_args.log_file)
            log = command_args.log_file
            # If --clear_logs the log files are cleared
            if command_args.clear_logs:
                clear_log_files([log])

        u.sys_log_message(u"%s\n" % os.path.abspath(command_args.output_dir),
                          log_file=DIRS_LOG)
        session_file = os.path.join(command_args.output_dir, SESSIONS_LOG)
    # create api instance form args
    api = a.get_api_instance(command_args,
                             u.check_dir(session_file))

    # Creates the corresponding api instance
    api = a.get_api_instance(command_args, u.check_dir(session_file))
    a.transform_dataset_options(command_args, api)


    # package_dir
    if command_args.package_dir is not None:
        create_package(command_args, api, command.common_options,
                       resume=resume)
    else:
        sys.exit("You must use the --package-dir flag pointing to the"
                 " directory where the metadata.json file is. Type\n"
                 "    bigmler whizzml --help\n"
                 " to see all the available options.")
示例#16
0
def delete_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    command = command_handling(args, COMMAND_LOG)

    # Parses command line arguments.
    command_args = a.parse_and_check(command)
    resume = command_args.resume
    if command_args.resume:
        # Keep the debug option if set
        debug = command_args.debug
        # Restore the args of the call to resume from the command log file
        stored_command = StoredCommand(args, COMMAND_LOG, DIRS_LOG)
        command = Command(None, stored_command=stored_command)
        # Logs the issued command and the resumed command
        session_file = os.path.join(stored_command.output_dir, SESSIONS_LOG)
        stored_command.log_command(session_file=session_file)
        # Parses resumed arguments.
        command_args = a.parse_and_check(command)
    else:
        if command_args.output_dir is None:
            command_args.output_dir = a.NOW
        directory = u.check_dir(os.path.join(command_args.output_dir, "tmp"))
        session_file = os.path.join(directory, SESSIONS_LOG)
        u.log_message(command.command + "\n", log_file=session_file)
        try:
            defaults_file = open(DEFAULTS_FILE, 'r')
            contents = defaults_file.read()
            defaults_file.close()
            defaults_copy = open(os.path.join(directory, DEFAULTS_FILE), 'w',
                                 0)
            defaults_copy.write(contents)
            defaults_copy.close()
        except IOError:
            pass
        u.sys_log_message(u"%s\n" % os.path.abspath(directory),
                          log_file=DIRS_LOG)

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    # Creates the corresponding api instance
    if resume and debug:
        command_args.debug = True
    api = a.get_api_instance(command_args, u.check_dir(session_file))

    delete_resources(command_args, api)
    u.log_message("_" * 80 + "\n", log_file=session_file)
示例#17
0
def delete_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    command = command_handling(args, COMMAND_LOG)

    # Parses command line arguments.
    command_args = a.parse_and_check(command)
    resume = command_args.resume
    if command_args.resume:
        # Keep the debug option if set
        debug = command_args.debug
        # Restore the args of the call to resume from the command log file
        stored_command = StoredCommand(args, COMMAND_LOG, DIRS_LOG)
        command = Command(None, stored_command=stored_command)
        # Logs the issued command and the resumed command
        session_file = os.path.join(stored_command.output_dir, SESSIONS_LOG)
        stored_command.log_command(session_file=session_file)
        # Parses resumed arguments.
        command_args = a.parse_and_check(command)
    else:
        if command_args.output_dir is None:
            command_args.output_dir = a.NOW
        directory = u.check_dir(os.path.join(command_args.output_dir, "tmp"))
        session_file = os.path.join(directory, SESSIONS_LOG)
        u.log_message(command.command + "\n", log_file=session_file)
        try:
            defaults_file = open(DEFAULTS_FILE, 'r')
            contents = defaults_file.read()
            defaults_file.close()
            defaults_copy = open(os.path.join(directory, DEFAULTS_FILE),
                                 'w', 0)
            defaults_copy.write(contents)
            defaults_copy.close()
        except IOError:
            pass
        u.sys_log_message(u"%s\n" % os.path.abspath(directory),
                          log_file=DIRS_LOG)

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    # Creates the corresponding api instance
    if resume and debug:
        command_args.debug = True
    api = a.get_api_instance(command_args, u.check_dir(session_file))

    delete_resources(command_args, api)
    u.log_message("_" * 80 + "\n", log_file=session_file)
示例#18
0
def execute_whizzml(args, api, session_file):
    """executes the code in a script or a source code file

    """

    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])
    path = args.output_dir
    if args.to_library:
        library = pw.library_processing(api,
                                        args,
                                        session_file=session_file,
                                        path=path,
                                        log=log)
    else:
        if args.script_file:
            # script is retrieved from the contents of the given local JSON file
            script, _, _ = u.read_local_resource(args.script_file)
            args.script = script['resource']
            args.script_ids = [args.script]
        elif args.code_file or args.code:
            script, scripts = pw.script_processing(api,
                                                   args,
                                                   session_file=session_file,
                                                   path=path,
                                                   log=log)
            args.script = script['resource']
            args.script_ids = scripts

        if (args.script or args.scripts) and not args.no_execute:
            execution = pw.execution_processing(api,
                                                args,
                                                session_file=session_file,
                                                path=path,
                                                log=log)
            execution = r.get_execution( \
                execution, api, args.verbosity, session_file)
            r.save_txt_and_json(execution['object']['execution'],
                                args.output,
                                api=api)
            args.execution = execution['resource']

    u.log_message("_" * 80 + "\n", log_file=session_file)
    u.print_generated_files(args.output_dir,
                            log_file=session_file,
                            verbosity=args.verbosity)
示例#19
0
def execute_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command_args, _, api, session_file, _ = get_context(args, SETTINGS)

    # process the command
    execute_whizzml(command_args, api, session_file)
    u.log_message("_" * 80 + "\n", log_file=session_file)
示例#20
0
def execute_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command_args, command, api, session_file, resume = get_context(args,
                                                                   SETTINGS)

    # process the command
    execute_whizzml(command_args, api, session_file)
    u.log_message("_" * 80 + "\n", log_file=session_file)
示例#21
0
def execute_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command = command_handling(args, COMMAND_LOG)
    default_output = 'whizzml_results'
    # Parses command line arguments.
    command_args = a.parse_and_check(command)
    resume = command_args.resume
    if command_args.resume:
        command_args, session_file, output_dir = get_stored_command(
            args,
            command_args.debug,
            command_log=COMMAND_LOG,
            dirs_log=DIRS_LOG,
            sessions_log=SESSIONS_LOG)
        if command_args.output is None:
            command_args.output = os.path.join(output_dir, default_output)
    else:
        if command_args.output_dir is None:
            command_args.output_dir = a.NOW
        if command_args.output is None:
            command_args.output = os.path.join(command_args.output_dir,
                                               default_output)
        if len(os.path.dirname(command_args.output).strip()) == 0:
            command_args.output = os.path.join(command_args.output_dir,
                                               command_args.output)
        directory = u.check_dir(command_args.output)
        session_file = os.path.join(directory, SESSIONS_LOG)
        u.log_message(command.command + "\n", log_file=session_file)
        try:
            shutil.copy(DEFAULTS_FILE, os.path.join(directory, DEFAULTS_FILE))
        except IOError:
            pass
        u.sys_log_message(u"%s\n" % os.path.abspath(directory),
                          log_file=DIRS_LOG)

    # Creates the corresponding api instance
    api = a.get_api_instance(command_args, u.check_dir(session_file))
    _ = a.get_output_args(api, command_args, resume)
    a.transform_args(command_args, command.flags, api, command.user_defaults)
    execute_whizzml(command_args, api, session_file)
    u.log_message("_" * 80 + "\n", log_file=session_file)
示例#22
0
def association_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command_args, _, api, session_file, resume = get_context(args, SETTINGS)

    # Selects the action to perform
    if a.has_train(command_args) or a.has_test(command_args):
        output_args = a.get_output_args(api, command_args, resume)
        compute_output(**output_args)
    u.log_message("_" * 80 + "\n", log_file=session_file)
示例#23
0
def execute_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command = command_handling(args, COMMAND_LOG)
    default_output = 'whizzml_results'
    # Parses command line arguments.
    command_args = a.parse_and_check(command)
    resume = command_args.resume
    if command_args.resume:
        command_args, session_file, output_dir = get_stored_command(
            args, command_args.debug, command_log=COMMAND_LOG,
            dirs_log=DIRS_LOG, sessions_log=SESSIONS_LOG)
        if command_args.output is None:
            command_args.output = os.path.join(output_dir,
                                               default_output)
    else:
        if command_args.output_dir is None:
            command_args.output_dir = a.NOW
        if command_args.output is None:
            command_args.output = os.path.join(command_args.output_dir,
                                               default_output)
        if len(os.path.dirname(command_args.output).strip()) == 0:
            command_args.output = os.path.join(command_args.output_dir,
                                               command_args.output)
        directory = u.check_dir(command_args.output)
        session_file = os.path.join(directory, SESSIONS_LOG)
        u.log_message(command.command + "\n", log_file=session_file)
        try:
            shutil.copy(DEFAULTS_FILE, os.path.join(directory, DEFAULTS_FILE))
        except IOError:
            pass
        u.sys_log_message(u"%s\n" % os.path.abspath(directory),
                          log_file=DIRS_LOG)

    # Creates the corresponding api instance
    api = a.get_api_instance(command_args, u.check_dir(session_file))
    _ = a.get_output_args(api, command_args, resume)
    a.transform_args(command_args, command.flags, api,
                     command.user_defaults)
    execute_whizzml(command_args, api, session_file)
    u.log_message("_" * 80 + "\n", log_file=session_file)
示例#24
0
def association_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command_args, command, api, session_file, resume = get_context(args,
                                                                   SETTINGS)

    # Selects the action to perform
    if a.has_train(command_args) or a.has_test(command_args):
        output_args = a.get_output_args(api, command_args, resume)
        compute_output(**output_args)
    u.log_message("_" * 80 + "\n", log_file=session_file)
示例#25
0
def pca_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    settings = {}
    settings.update(SETTINGS)

    command_args, _, api, session_file, _ = get_context(args, settings)

    # Selects the action to perform
    if (a.has_train(command_args) or a.has_test(command_args)
            or command_args.export_fields):
        compute_output(api, command_args)
    u.log_message("_" * 80 + "\n", log_file=session_file)
示例#26
0
def fusion_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    settings = {}
    settings.update(SETTINGS)
    if '--evaluate' in args:
        settings.update({"default_output": "evaluation"})

    command_args, _, api, session_file, _ = get_context(args, settings)

    # Selects the action to perform
    if a.has_value(command_args, "fusion_models_") or a.has_test(command_args):
        compute_output(api, command_args)
    u.log_message("_" * 80 + "\n", log_file=session_file)
示例#27
0
def whizzml_dispatcher(args=sys.argv[1:]):
    """Main processing of the parsed options for BigMLer whizzml

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command_args, command, api, _, resume = get_context(args, SETTINGS)

    # package_dir
    if command_args.package_dir is not None:
        command_args.package_dir = os.path.expanduser(command_args.package_dir)
        create_package(command_args, api, command, resume=resume)
    else:
        sys.exit("You must use the --package-dir flag pointing to the"
                 " directory where the metadata.json file is. Type\n"
                 "    bigmler whizzml --help\n"
                 " to see all the available options.")
示例#28
0
def pca_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    settings = {}
    settings.update(SETTINGS)

    command_args, command, api, session_file, resume = get_context(args,
                                                                   settings)

    # Selects the action to perform
    if (a.has_train(command_args) or a.has_test(command_args)
            or command_args.export_fields):
        compute_output(api, command_args)
    u.log_message("_" * 80 + "\n", log_file=session_file)
示例#29
0
def execute_whizzml(args, api, session_file):
    """executes the code in a script or a source code file

    """

    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])
    path = args.output_dir
    if args.to_library:
        pw.library_processing( \
            api, args, session_file=session_file, path=path, log=log)
    else:
        if args.script_file:
            # script is retrieved from the contents of the given local file
            script, _, _ = u.read_local_resource(args.script_file)
            args.script = script['resource']
            args.script_ids = [args.script]
        elif args.code_file or args.code:
            script, scripts = pw.script_processing( \
                api, args, session_file=session_file, path=path, log=log)
            args.script = script if isinstance(script, basestring) else \
                script.get('resource')
            args.script_ids = scripts

        if (args.script or args.scripts) and not args.no_execute:
            execution = pw.execution_processing( \
                api, args, session_file=session_file, path=path, log=log)
            execution = r.get_execution( \
                execution, api, args.verbosity, session_file)
            r.save_txt_and_json(execution['object']['execution'],
                                args.output, api=api)
            args.execution = execution['resource']

    u.log_message("_" * 80 + "\n", log_file=session_file)
    u.print_generated_files(args.output_dir, log_file=session_file,
                            verbosity=args.verbosity)
示例#30
0
def fusion_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    settings = {}
    settings.update(SETTINGS)
    if '--evaluate' in args:
        settings.update({"default_output": "evaluation"})

    command_args, command, api, session_file, resume = get_context(args,
                                                                   settings)

    # Selects the action to perform
    if a.has_value(command_args, "fusion_models_") or a.has_test(command_args):
        compute_output(api, command_args)
    u.log_message("_" * 80 + "\n", log_file=session_file)
示例#31
0
def whizzml_dispatcher(args=sys.argv[1:]):
    """Main processing of the parsed options for BigMLer whizzml

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command_args, command, api, session_file, resume = get_context(args,
                                                                   SETTINGS)

    # package_dir
    if command_args.package_dir is not None:
        command_args.package_dir = os.path.expanduser(command_args.package_dir)
        create_package(command_args, api, command.common_options,
                       resume=resume)
    else:
        sys.exit("You must use the --package-dir flag pointing to the"
                 " directory where the metadata.json file is. Type\n"
                 "    bigmler whizzml --help\n"
                 " to see all the available options.")
示例#32
0
def export_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different export functions

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command_args, command, api, session_file, resume = get_context(args,
                                                                   SETTINGS)
    # Creates the corresponding api instance
    resource = command_args.ensemble or command_args.model
    message = "Generating %s code for %s\n\n" % (command_args.language,
                                                 resource)
    u.log_message(message, \
        log_file=session_file, console=command_args.verbosity)
    export_code(command_args, api)
    u.log_message("_" * 80 + "\n", log_file=session_file)

    u.print_generated_files(command_args.output_dir, log_file=session_file,
                            verbosity=command_args.verbosity)
示例#33
0
def export_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different export functions

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command_args, _, api, session_file, _ = get_context(args, SETTINGS)
    # Creates the corresponding api instance
    resource = command_args.ensemble or command_args.model
    message = "Generating %s code for %s\n\n" % (command_args.language,
                                                 resource)
    u.log_message(message, \
        log_file=session_file, console=command_args.verbosity)
    export_code(command_args, api)
    u.log_message("_" * 80 + "\n", log_file=session_file)

    u.print_generated_files(command_args.output_dir,
                            log_file=session_file,
                            verbosity=command_args.verbosity)
示例#34
0
def retrain_dispatcher(args=sys.argv[1:]):
    """Main processing of the parsed options for BigMLer retrain

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    # parses the command line to get the context args and the log files to use
    command_args, command, api, session_file, resume = get_context(args,
                                                                   SETTINGS)

    # --id or --model-tag, --ensemble-tag, etc. is compulsory
    if check_compulsory_options(command.flags, command_args):
        retrain_model(command_args, api, command.common_options,
                      session_file=session_file)
        u.log_message("_" * 80 + "\n", log_file=session_file)
    else:
        sys.exit("You must provide the ID of the resource to be"
                 " retrained in the --id option or a unique tag"
                 " to retrieve such ID."
                 " Type bigmler retrain --help\n"
                 " to see all the available options.")
示例#35
0
def compute_output(api, args):
    """ Creates a dataset using the `training_set`.

    """

    source = None
    dataset = None
    fields = None
    other_label = OTHER
    multi_label_data = None
    multi_label_fields = []
    datasets = None

    # variables from command-line options
    resume = args.resume_
    output = args.output
    dataset_fields = args.dataset_fields_

    check_args_coherence(args)
    path = u.check_dir(output)

    session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # labels to be used in multi-label expansion
    labels = (None if args.labels is None else [
        label.strip() for label in args.labels.split(args.args_separator)
    ])
    if labels is not None:
        labels = sorted([label for label in labels])

    # multi_label file must be preprocessed to obtain a new extended file
    if args.multi_label and args.training_set is not None:
        (args.training_set, multi_label_data) = ps.multi_label_expansion(
            args.training_set,
            args.train_header,
            args,
            path,
            labels=labels,
            session_file=session_file)
        args.train_header = True
        args.objective_field = multi_label_data["objective_name"]
        all_labels = l.get_all_labels(multi_label_data)
        if not labels:
            labels = all_labels
    else:
        all_labels = labels
    if args.objective_field:
        csv_properties.update({'objective_field': args.objective_field})
    if args.source_file:
        # source is retrieved from the contents of the given local JSON file
        source, csv_properties, fields = u.read_local_resource(
            args.source_file, csv_properties=csv_properties)
    else:
        # source is retrieved from the remote object
        source, resume, csv_properties, fields = ps.source_processing(
            api,
            args,
            resume,
            csv_properties=csv_properties,
            multi_label_data=multi_label_data,
            session_file=session_file,
            path=path,
            log=log)
    if source is not None:
        args.source = bigml.api.get_source_id(source)
    if args.multi_label and source:
        multi_label_data = l.get_multi_label_data(source)
        (args.objective_field, labels, all_labels,
         multi_label_fields) = l.multi_label_sync(args.objective_field, labels,
                                                  multi_label_data, fields,
                                                  multi_label_fields)
    if fields and args.export_fields:
        fields.summary_csv(os.path.join(path, args.export_fields))
    if args.dataset_file:
        # dataset is retrieved from the contents of the given local JSON file
        model_dataset, csv_properties, fields = u.read_local_resource(
            args.dataset_file, csv_properties=csv_properties)
        if not args.datasets:
            datasets = [model_dataset]
            dataset = model_dataset
        else:
            datasets = u.read_datasets(args.datasets)
    if not datasets:
        # dataset is retrieved from the remote object
        datasets, resume, csv_properties, fields = pd.dataset_processing(
            source,
            api,
            args,
            resume,
            fields=fields,
            csv_properties=csv_properties,
            multi_label_data=multi_label_data,
            session_file=session_file,
            path=path,
            log=log)

    if datasets:
        dataset = datasets[-1]
        if args.to_csv is not None:
            resume = pd.export_dataset(dataset,
                                       api,
                                       args,
                                       resume,
                                       session_file=session_file,
                                       path=path)

        # Now we have a dataset, let's check if there's an objective_field
        # given by the user and update it in the fields structure
        args.objective_id_ = get_objective_id(args, fields)

    # If test_split is used, split the dataset in a training and a test dataset
    # according to the given split
    if args.test_split > 0:
        dataset, test_dataset, resume = pd.split_processing(
            dataset,
            api,
            args,
            resume,
            multi_label_data=multi_label_data,
            session_file=session_file,
            path=path,
            log=log)
        datasets[0] = dataset

    # Check if the dataset has a categorical objective field and it
    # has a max_categories limit for categories
    if args.max_categories > 0 and len(datasets) == 1:
        if pd.check_max_categories(fields.fields[args.objective_id_]):
            distribution = pd.get_categories_distribution(
                dataset, args.objective_id_)
            if distribution and len(distribution) > args.max_categories:
                categories = [element[0] for element in distribution]
                other_label = pd.create_other_label(categories, other_label)
                datasets, resume = pd.create_categories_datasets(
                    dataset,
                    distribution,
                    fields,
                    args,
                    api,
                    resume,
                    session_file=session_file,
                    path=path,
                    log=log,
                    other_label=other_label)
        else:
            sys.exit("The provided objective field is not categorical nor "
                     "a full terms only text field. "
                     "Only these fields can be used with"
                     "  --max-categories")

    # If any of the transformations is applied,
    # generate a new dataset from the given list of datasets
    if args.new_dataset:
        dataset, resume = pd.create_new_dataset(datasets,
                                                api,
                                                args,
                                                resume,
                                                fields=fields,
                                                session_file=session_file,
                                                path=path,
                                                log=log)
        datasets = [dataset]

    # Check if the dataset has a generators file associated with it, and
    # generate a new dataset with the specified field structure. Also
    # if the --to-dataset flag is used to clone or sample the original dataset
    if args.new_fields or args.sample_rate != 1 or \
            (args.lisp_filter or args.json_filter) and not has_source(args):
        if fields is None:
            if isinstance(dataset, basestring):
                dataset = u.check_resource(dataset, api=api)
            fields = Fields(dataset, csv_properties)
        args.objective_id_ = get_objective_id(args, fields)
        args.objective_name_ = fields.field_name(args.objective_id_)
        dataset, resume = pd.create_new_dataset(dataset,
                                                api,
                                                args,
                                                resume,
                                                fields=fields,
                                                session_file=session_file,
                                                path=path,
                                                log=log)
        datasets[0] = dataset
        # rebuild fields structure for new ids and fields
        csv_properties.update({
            'objective_field': args.objective_name_,
            'objective_field_present': True
        })
        fields = pd.get_fields_structure(dataset, csv_properties)
        args.objective_id_ = get_objective_id(args, fields)
    if args.multi_label and dataset and multi_label_data is None:
        multi_label_data = l.get_multi_label_data(dataset)
        (args.objective_field, labels, all_labels,
         multi_label_fields) = l.multi_label_sync(args.objective_field, labels,
                                                  multi_label_data, fields,
                                                  multi_label_fields)

    if dataset:
        # retrieves max_categories data, if any
        args.max_categories = get_metadata(dataset, 'max_categories',
                                           args.max_categories)
        other_label = get_metadata(dataset, 'other_label', other_label)
    if fields and args.export_fields:
        fields.summary_csv(os.path.join(path, args.export_fields))

    u.print_generated_files(path,
                            log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
示例#36
0
def analyze_dispatcher(args=sys.argv[1:]):
    """Main processing of the parsed options for BigMLer analyze

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command = command_handling(args, COMMAND_LOG)

    # Parses command line arguments.
    command_args = command.parser.parse_args(command.args)
    resume = command_args.resume
    if resume:
        # Keep the debug option if set
        debug = command_args.debug
        # Restore the args of the call to resume from the command log file
        stored_command = StoredCommand(args, COMMAND_LOG, DIRS_LOG)
        command = Command(None, stored_command=stored_command)
        # Logs the issued command and the resumed command
        session_file = os.path.join(stored_command.output_dir, SESSIONS_LOG)
        stored_command.log_command(session_file=session_file)
        # Parses resumed arguments.
        command_args = command.parser.parse_args(command.args)
        command_args.debug = debug
    else:
        if command_args.output_dir is None:
            command_args.output_dir = a.NOW
        session_file = os.path.join(command_args.output_dir, SESSIONS_LOG)
        # If logging is required, open the file for logging
        log = None
        if command_args.log_file:
            u.check_dir(command_args.log_file)
            log = command_args.log_file
            # If --clear_logs the log files are cleared
            if command_args.clear_logs:
                clear_log_files([log])

        if command_args.model_fields:
            model_fields = command_args.model_fields.split(',')
            command_args.model_fields_ = [
                model_field.strip() for model_field in model_fields
            ]
        else:
            command_args.model_fields_ = {}
        u.sys_log_message(u"%s\n" % os.path.abspath(command_args.output_dir),
                          log_file=DIRS_LOG)
        session_file = os.path.join(command_args.output_dir, SESSIONS_LOG)
    # create api instance form args
    api = a.get_api_instance(command_args, u.check_dir(session_file))
    # --maximize flag will be deprecated. Use --optimize flag.
    if command_args.maximize is not None and command_args.optimize is None:
        command_args.optimize = command_args.maximize
    incompatible_flags = [
        command_args.cv, command_args.features, command_args.nodes
    ]
    if sum([int(bool(flag)) for flag in incompatible_flags]) > 1:
        sys.exit("The following flags cannot be used together:\n    --features"
                 "\n    --cross-validation\n    --nodes")
    # k-fold cross-validation
    if command_args.cv and command_args.dataset is not None:
        create_kfold_cv(command_args,
                        api,
                        command.common_options,
                        resume=resume)

    # features analysis
    if command_args.features:
        create_features_analysis(command_args,
                                 api,
                                 command.common_options,
                                 resume=resume)

    # node threshold analysis
    if command_args.nodes:
        create_nodes_analysis(command_args,
                              api,
                              command.common_options,
                              resume=resume)
示例#37
0
def reify_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    command = command_handling(args, COMMAND_LOG)

    # Parses command line arguments.
    command_args = a.parse_and_check(command)
    if command_args.resume:
        command_args, session_file, _ = get_stored_command(
            args,
            command_args.debug,
            command_log=COMMAND_LOG,
            dirs_log=DIRS_LOG,
            sessions_log=SESSIONS_LOG)
        if command_args.output is None:
            command_args.output = os.path.join(command_args.output_dir,
                                               DEFAULT_OUTPUT)
    else:
        if command_args.output_dir is None:
            command_args.output_dir = a.NOW
        if command_args.output is None:
            command_args.output = os.path.join(command_args.output_dir,
                                               DEFAULT_OUTPUT)
        if len(os.path.dirname(command_args.output).strip()) == 0:
            command_args.output = os.path.join(command_args.output_dir,
                                               command_args.output)
        directory = u.check_dir(command_args.output)
        command_args.output_dir = directory
        session_file = os.path.join(directory, SESSIONS_LOG)
        u.log_message(command.command + "\n", log_file=session_file)

        directory = u.check_dir(os.path.join(command_args.output_dir, "tmp"))
        session_file = os.path.join(directory, SESSIONS_LOG)
        u.log_message(command.command + "\n", log_file=session_file)
        try:
            shutil.copy(DEFAULTS_FILE, os.path.join(directory, DEFAULTS_FILE))
        except IOError:
            pass
        u.sys_log_message(u"%s\n" % os.path.abspath(directory),
                          log_file=DIRS_LOG)

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    def logger(message):
        """Partial to log messages according to args.verbosity

        """
        u.log_message(u.dated(message), \
            log_file=session_file, console=command_args.verbosity)

    # Creates the corresponding api instance
    api = a.get_api_instance(command_args, u.check_dir(session_file))
    message = "Starting reification for %s\n\n" % command_args.resource_id
    u.log_message(message, \
        log_file=session_file, console=command_args.verbosity)
    reify_resources(command_args, api, logger)
    message = "\nReification complete. See the results in %s\n\n" % \
        command_args.output
    u.log_message(message, \
        log_file=session_file, console=command_args.verbosity)
    u.log_message("_" * 80 + "\n", log_file=session_file)

    u.print_generated_files(command_args.output_dir,
                            log_file=session_file,
                            verbosity=command_args.verbosity)
示例#38
0
def compute_output(api, args):
    """ Creates one or more models using the `training_set` or uses the ids
    of previously created BigML models to make predictions for the `test_set`.

    """

    association = None
    associations = None
    # no multi-label support at present

    # variables from command-line options
    resume = args.resume_
    association_ids = args.association_ids_
    output = args.predictions
    # there's only one association resource to be generated at present
    args.max_parallel_associations = 1
    # associations cannot be published yet.
    args.public_association = False

    # It is compulsory to have a description to publish either datasets or
    # associations
    if (not args.description_ and (args.public_association or
                                   args.public_dataset)):
        sys.exit("You should provide a description to publish.")

    # When using --new-fields, it is compulsory to specify also a dataset
    # id
    if args.new_fields and not args.dataset:
        sys.exit("To use --new-fields you must also provide a dataset id"
                 " to generate the new dataset from it.")

    path = u.check_dir(output)
    session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # basic pre-model step: creating or retrieving the source related info
    source, resume, csv_properties, fields = pms.get_source_info(
        api, args, resume, csv_properties, session_file, path, log)
    # basic pre-model step: creating or retrieving the dataset related info
    dataset_properties = pms.get_dataset_info(
        api, args, resume, source,
        csv_properties, fields, session_file, path, log)
    (_, datasets, test_dataset,
     resume, csv_properties, fields) = dataset_properties
    if args.association_file:
        # association is retrieved from the contents of the given local JSON
        # file
        association, csv_properties, fields = u.read_local_resource(
            args.association_file,
            csv_properties=csv_properties)
        associations = [association]
        association_ids = [association['resource']]
    else:
        # association is retrieved from the remote object
        associations, association_ids, resume = pa.associations_processing(
            datasets, associations, association_ids, api, args, resume,
            fields=fields,
            session_file=session_file, path=path, log=log)
        if associations:
            association = associations[0]

    # We update the association's public state if needed
    if association:
        if isinstance(association, basestring):
            if not a.has_test(args):
                query_string = MINIMUM_MODEL
            else:
                query_string = ''
            association = u.check_resource(association, api.get_association,
                                           query_string=query_string)
        associations[0] = association
        if (args.public_association or
                (args.shared_flag and
                 r.shared_changed(args.shared, association))):
            association_args = {}
            if args.shared_flag and \
                    r.shared_changed(args.shared, association):
                association_args.update(shared=args.shared)
            if args.public_association:
                association_args.update(r.set_publish_association_args(args))
            if association_args:
                association = r.update_association( \
                    association, association_args, args,
                    api=api, path=path,
                    session_file=session_file)
                associations[0] = association

    # We get the fields of the association if we haven't got
    # them yet and need them
    if association and args.test_set:
        fields = pa.get_association_fields(association, csv_properties, args)

    # If predicting
    if associations and (a.has_test(args) or (test_dataset and args.remote)):
        if test_dataset is None:
            test_dataset = get_test_dataset(args)

        # Remote association sets: association sets are computed as
        # batch association sets
        # in bigml.com except when --no-batch flag is set. They are currently
        # not supported yet
        if args.remote and not args.no_batch:
            sys.exit("Batch association sets are currently not supported.")
            """
            # create test source from file
            test_name = "%s - test" % args.name
            if args.test_source is None:
                test_properties = ps.test_source_processing(
                    api, args, resume, name=test_name,
                    session_file=session_file, path=path, log=log)
                (test_source, resume,
                 csv_properties, test_fields) = test_properties
            else:
                test_source_id = bigml.api.get_source_id(args.test_source)
                test_source = api.check_resource(test_source_id)
            if test_dataset is None:
                # create test dataset from test source
                dataset_args = r.set_basic_dataset_args(args, name=test_name)
                test_dataset, resume = pd.alternative_dataset_processing(
                    test_source, "test", dataset_args, api, args,
                    resume, session_file=session_file, path=path, log=log)
            else:
                test_dataset_id = bigml.api.get_dataset_id(test_dataset)
                test_dataset = api.check_resource(test_dataset_id)
            test_fields = pd.get_fields_structure(test_dataset,
                                                  csv_properties)
            batch_association_args = r.set_batch_association_args(
                args, fields=fields,
                dataset_fields=test_fields)

            remote_association( \
                association, test_dataset, batch_association_args,
                args, api, resume, prediction_file=output,
                session_file=session_file, path=path, log=log)
            """
        else:
            sys.exit("Local prediction of association sets is currently"
                     " not supported.")
            """
            association_set(associations, fields, args,
                            session_file=session_file)
            """
    u.print_generated_files(path, log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
示例#39
0
def compute_output(api, args):
    """ Creates one or more anomaly detectors using the `training_set`
        or uses the ids of previously created BigML models to make
        predictions for the `test_set`.

    """

    anomaly = None
    anomalies = None
    # no multi-label support at present

    # variables from command-line options
    resume = args.resume_
    anomaly_ids = args.anomaly_ids_
    output = args.predictions
    # there's only one anomaly detector to be generated at present
    args.max_parallel_anomalies = 1
    # anomalies cannot be published yet.
    args.public_anomaly = False

    # It is compulsory to have a description to publish either datasets or
    # anomalies
    if (not args.description_ and (args.public_anomaly or
                                   args.public_dataset)):
        sys.exit("You should provide a description to publish.")

    # When using --new-fields, it is compulsory to specify also a dataset
    # id
    if args.new_fields and not args.dataset:
        sys.exit("To use --new-fields you must also provide a dataset id"
                 " to generate the new dataset from it.")

    path = u.check_dir(output)
    session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])
    # basic pre-model step: creating or retrieving the source related info
    source, resume, csv_properties, fields = pms.get_source_info(
        api, args, resume, csv_properties, session_file, path, log)
    # basic pre-model step: creating or retrieving the dataset related info
    dataset_properties = pms.get_dataset_info(
        api, args, resume, source,
        csv_properties, fields, session_file, path, log)
    (dataset, datasets, test_dataset, resume,
     csv_properties, fields) = dataset_properties
    if args.anomaly_file:
        # anomaly is retrieved from the contents of the given local JSON file
        anomaly, csv_properties, fields = u.read_local_resource(
            args.anomaly_file,
            csv_properties=csv_properties)
        anomalies = [anomaly]
        anomaly_ids = [anomaly['resource']]
    else:
        # anomaly is retrieved from the remote object
        anomalies, anomaly_ids, resume = pa.anomalies_processing(
            datasets, anomalies, anomaly_ids, api, args, resume, fields=fields,
            session_file=session_file, path=path, log=log)
    if anomalies:
        anomaly = anomalies[0]

    # We update the anomaly's public state if needed
    if anomaly:
        if not a.has_test(args) and not args.anomalies_dataset:
            query_string = MINIMUM_MODEL
        elif not a.has_test(args):
            query_string = ";".join([EXCLUDE_TREES, r.ALL_FIELDS_QS])
        else:
            query_string = r.ALL_FIELDS_QS
        try:
            anomaly_id = anomaly.get('resource', anomaly)
        except AttributeError:
            anomaly_id = anomaly
        anomaly = u.check_resource(anomaly_id,
                                   query_string=query_string,
                                   api=api)
        anomalies[0] = anomaly
        if (args.public_anomaly or
                (args.shared_flag and r.shared_changed(args.shared, anomaly))):
            anomaly_args = {}
            if args.shared_flag and r.shared_changed(args.shared, anomaly):
                anomaly_args.update(shared=args.shared)
            if args.public_anomaly:
                anomaly_args.update(r.set_publish_anomaly_args(args))
            if anomaly_args:
                anomaly = r.update_anomaly(anomaly, anomaly_args, args,
                                           api=api, path=path,
                                           session_file=session_file)
                anomalies[0] = anomaly

    # We get the fields of the anomaly detector if we haven't got
    # them yet and need them
    if anomaly and args.test_set:
        fields = pa.get_anomaly_fields(anomaly, csv_properties, args)

    # If creating a top anomalies excluded/included dataset
    if args.anomalies_dataset and anomaly:
        origin_dataset = anomaly['object'].get('dataset')
        if origin_dataset is None:
            sys.exit("The dataset used to generate the anomaly detector "
                     "cannot be found. Failed to generate the anomalies "
                     " dataset.")
        local_anomaly = Anomaly(anomaly)
        include = args.anomalies_dataset == ANOMALIES_IN
        args._anomaly_filter = local_anomaly.anomalies_filter(include=include)
        new_dataset, resume = pd.create_new_dataset(
            origin_dataset, api, args, resume, fields=fields,
            session_file=session_file, path=path, log=log)
    # If predicting
    if anomaly and args.score:
        args.test_dataset = anomaly['object']['dataset']
    if anomalies and (a.has_test(args) or (test_dataset and args.remote)):
        # test dataset can be defined by --test-split or --test-dataset or
        # --test-datasets
        if test_dataset is None:
            test_dataset = get_test_dataset(args)
        # Remote anomaly scores: scores are computed as batch anomaly scores
        # in bigml.com except when --no-batch flag is set on
        if args.remote and not args.no_batch:
            # create test source from file
            test_name = "%s - test" % args.name
            if args.test_source is None:
                test_properties = ps.test_source_processing(
                    api, args, resume, name=test_name,
                    session_file=session_file, path=path, log=log)
                (test_source, resume,
                 csv_properties, test_fields) = test_properties
            else:
                test_source_id = bigml.api.get_source_id(args.test_source)
                test_source = api.check_resource(test_source_id)
            if test_dataset is None:
                # create test dataset from test source
                dataset_args = r.set_basic_dataset_args(args, name=test_name)
                test_dataset, resume = pd.alternative_dataset_processing(
                    test_source, "test", dataset_args, api, args,
                    resume, session_file=session_file, path=path, log=log)
            else:
                test_dataset_id = bigml.api.get_dataset_id(test_dataset)
                test_dataset = api.check_resource(test_dataset_id)
            test_fields = pd.get_fields_structure(test_dataset,
                                                  csv_properties)
            batch_anomaly_score_args = r.set_batch_anomaly_score_args(
                args, fields=fields,
                dataset_fields=test_fields)

            remote_anomaly_score(anomaly, test_dataset,
                                 batch_anomaly_score_args, args,
                                 api, resume, prediction_file=output,
                                 session_file=session_file, path=path, log=log)

        else:
            anomaly_score(anomalies, fields, args, session_file=session_file)

    u.print_generated_files(path, log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
示例#40
0
def compute_output(api, args):
    """ Creates a dataset using the `training_set`.

    """


    source = None
    dataset = None
    fields = None
    other_label = OTHER
    multi_label_data = None
    multi_label_fields = []
    datasets = None

    # variables from command-line options
    resume = args.resume_
    output = args.output
    dataset_fields = args.dataset_fields_

    check_args_coherence(args)
    path = u.check_dir(output)

    session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # labels to be used in multi-label expansion
    labels = (None if args.labels is None else
              [label.strip() for label in
               args.labels.split(args.args_separator)])
    if labels is not None:
        labels = sorted([label for label in labels])

    # multi_label file must be preprocessed to obtain a new extended file
    if args.multi_label and args.training_set is not None:
        (args.training_set, multi_label_data) = ps.multi_label_expansion(
            args.training_set, args.train_header, args, path,
            labels=labels, session_file=session_file)
        args.train_header = True
        args.objective_field = multi_label_data["objective_name"]
        all_labels = l.get_all_labels(multi_label_data)
        if not labels:
            labels = all_labels
    else:
        all_labels = labels
    if args.objective_field:
        csv_properties.update({'objective_field': args.objective_field})
    if args.source_file:
        # source is retrieved from the contents of the given local JSON file
        source, csv_properties, fields = u.read_local_resource(
            args.source_file,
            csv_properties=csv_properties)
    else:
        # source is retrieved from the remote object
        source, resume, csv_properties, fields = ps.source_processing(
            api, args, resume,
            csv_properties=csv_properties, multi_label_data=multi_label_data,
            session_file=session_file, path=path, log=log)
    if source is not None:
        args.source = bigml.api.get_source_id(source)
    if args.multi_label and source:
        multi_label_data = l.get_multi_label_data(source)
        (args.objective_field,
         labels,
         all_labels,
         multi_label_fields) = l.multi_label_sync(args.objective_field,
                                                  labels,
                                                  multi_label_data,
                                                  fields,
                                                  multi_label_fields)
    if fields and args.export_fields:
        fields.summary_csv(os.path.join(path, args.export_fields))
    if args.dataset_file:
        # dataset is retrieved from the contents of the given local JSON file
        model_dataset, csv_properties, fields = u.read_local_resource(
            args.dataset_file,
            csv_properties=csv_properties)
        if not args.datasets:
            datasets = [model_dataset]
            dataset = model_dataset
        else:
            datasets = u.read_datasets(args.datasets)
    if not datasets:
        # dataset is retrieved from the remote object
        datasets, resume, csv_properties, fields = pd.dataset_processing(
            source, api, args, resume,
            fields=fields,
            csv_properties=csv_properties,
            multi_label_data=multi_label_data,
            session_file=session_file, path=path, log=log)

    if datasets:
        dataset = datasets[-1]
        if args.to_csv is not None:
            resume = pd.export_dataset(dataset, api, args, resume,
                                       session_file=session_file, path=path)

        # Now we have a dataset, let's check if there's an objective_field
        # given by the user and update it in the fields structure
        args.objective_id_ = get_objective_id(args, fields)

    # If test_split is used, split the dataset in a training and a test dataset
    # according to the given split
    if args.test_split > 0:
        dataset, test_dataset, resume = pd.split_processing(
            dataset, api, args, resume,
            multi_label_data=multi_label_data,
            session_file=session_file, path=path, log=log)
        datasets[0] = dataset

    # Check if the dataset has a categorical objective field and it
    # has a max_categories limit for categories
    if args.max_categories > 0 and len(datasets) == 1:
        if pd.check_max_categories(fields.fields[args.objective_id_]):
            distribution = pd.get_categories_distribution(dataset,
                                                          args.objective_id_)
            if distribution and len(distribution) > args.max_categories:
                categories = [element[0] for element in distribution]
                other_label = pd.create_other_label(categories, other_label)
                datasets, resume = pd.create_categories_datasets(
                    dataset, distribution, fields, args,
                    api, resume, session_file=session_file, path=path, log=log,
                    other_label=other_label)
        else:
            sys.exit("The provided objective field is not categorical nor "
                     "a full terms only text field. "
                     "Only these fields can be used with"
                     "  --max-categories")

    # If any of the transformations is applied,
    # generate a new dataset from the given list of datasets
    if args.new_dataset:
        dataset, resume = pd.create_new_dataset(
            datasets, api, args, resume, fields=fields,
            session_file=session_file, path=path, log=log)
        datasets = [dataset]

    # Check if the dataset has a generators file associated with it, and
    # generate a new dataset with the specified field structure. Also
    # if the --to-dataset flag is used to clone or sample the original dataset
    if args.new_fields or args.sample_rate != 1 or \
            (args.lisp_filter or args.json_filter) and not has_source(args):
        if fields is None:
            if isinstance(dataset, basestring):
                dataset = u.check_resource(dataset, api=api)
            fields = Fields(dataset, csv_properties)
        args.objective_id_ = get_objective_id(args, fields)
        args.objective_name_ = fields.field_name(args.objective_id_)
        dataset, resume = pd.create_new_dataset(
            dataset, api, args, resume, fields=fields,
            session_file=session_file, path=path, log=log)
        datasets[0] = dataset
        # rebuild fields structure for new ids and fields
        csv_properties.update({'objective_field': args.objective_name_,
                               'objective_field_present': True})
        fields = pd.get_fields_structure(dataset, csv_properties)
        args.objective_id_ = get_objective_id(args, fields)
    if args.multi_label and dataset and multi_label_data is None:
        multi_label_data = l.get_multi_label_data(dataset)
        (args.objective_field,
         labels,
         all_labels,
         multi_label_fields) = l.multi_label_sync(args.objective_field,
                                                  labels,
                                                  multi_label_data,
                                                  fields, multi_label_fields)

    if dataset:
        # retrieves max_categories data, if any
        args.max_categories = get_metadata(dataset, 'max_categories',
                                           args.max_categories)
        other_label = get_metadata(dataset, 'other_label',
                                   other_label)
    if fields and args.export_fields:
        fields.summary_csv(os.path.join(path, args.export_fields))

    u.print_generated_files(path, log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
示例#41
0
def compute_output(api, args):
    """ Creates one or more models using the `training_set` or uses the ids
    of previously created BigML models to make predictions for the `test_set`.

    """

    pca = None

    # variables from command-line options
    resume = args.resume_
    pca_ids = args.pca_ids_
    output = args.projections
    # there's only one pca to be generated at present
    args.max_parallel_pcas = 1
    # pca cannot be published yet.
    args.public_pca = False

    # It is compulsory to have a description to publish either datasets or
    # pcas
    if (not args.description_ and (args.public_pca or
                                   args.public_dataset)):
        sys.exit("You should provide a description to publish.")

    # When using --new-fields, it is compulsory to specify also a dataset
    # id
    if args.new_fields and not args.dataset:
        sys.exit("To use --new-fields you must also provide a dataset id"
                 " to generate the new dataset from it.")

    path = u.check_dir(output)
    session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    if args.objective_field:
        csv_properties.update({'objective_field': args.objective_field})
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # basic pre-model step: creating or retrieving the source related info
    source, resume, csv_properties, fields = pms.get_source_info(
        api, args, resume, csv_properties, session_file, path, log)
    # basic pre-model step: creating or retrieving the dataset related info
    dataset_properties = pms.get_dataset_info(
        api, args, resume, source,
        csv_properties, fields, session_file, path, log)
    (_, datasets, test_dataset,
     resume, csv_properties, fields) = dataset_properties
    if args.pca_file:
        # pca regression is retrieved from the contents of the given local
        # JSON file
        pca, csv_properties, fields = u.read_local_resource(
            args.pca_file,
            csv_properties=csv_properties)
        pac_ids = [pca]
    else:
        # pca is retrieved from the remote object or created
        pca, resume = \
            pc.pca_processing( \
            datasets, pca, pca_ids, \
            api, args, resume, fields=fields, \
            session_file=session_file, path=path, log=log)

    # We update the pca public state if needed
    if pca:
        if isinstance(pca, basestring):
            if not a.has_test(args):
                query_string = MINIMUM_MODEL
            elif args.export_fields:
                query_string = r.ALL_FIELDS_QS
            else:
                query_string = ''
            pca = u.check_resource(pca,
                                   api.get_pca,
                                   query_string=query_string)
        if (args.public_pca or
                (args.shared_flag and r.shared_changed(args.shared,
                                                       pca))):
            pca_args = {}
            if args.shared_flag and r.shared_changed(args.shared,
                                                     pca):
                pca_args.update(shared=args.shared)
            if args.public_pca:
                pca_args.update( \
                    r.set_publish_pca_args(args))
            if pca_args:
                pca = r.update_pca( \
                    pca, pca_args, args,
                    api=api, path=path, \
                    session_file=session_file)

    # We get the fields of the pca if we haven't got
    # them yet and need them
    if pca and (args.test_set or args.export_fields):
        fields = pc.get_pca_fields( \
            pca, csv_properties, args)

    if fields and args.export_fields:
        fields.summary_csv(os.path.join(path, args.export_fields))


    # If predicting
    if pca and (a.has_test(args) or \
            (test_dataset and args.remote)):
        if test_dataset is None:
            test_dataset = get_test_dataset(args)

        # Remote projections: projections are computed as batch projections
        # in bigml.com except when --no-batch flag is set on
        if args.remote and not args.no_batch:
            # create test source from file
            test_name = "%s - test" % args.name
            if args.test_source is None:
                test_properties = ps.test_source_processing(
                    api, args, resume, name=test_name,
                    session_file=session_file, path=path, log=log)
                (test_source, resume,
                 csv_properties, test_fields) = test_properties
            else:
                test_source_id = bigml.api.get_source_id(args.test_source)
                test_source = api.check_resource(test_source_id)
            if test_dataset is None:
                # create test dataset from test source
                dataset_args = r.set_basic_dataset_args(args, name=test_name)
                test_dataset, resume = pd.alternative_dataset_processing(
                    test_source, "test", dataset_args, api, args,
                    resume, session_file=session_file, path=path, log=log)
            else:
                test_dataset_id = bigml.api.get_dataset_id(test_dataset)
                test_dataset = api.check_resource(test_dataset_id)

            csv_properties.update(objective_field=None,
                                  objective_field_present=False)
            test_fields = pd.get_fields_structure(test_dataset,
                                                  csv_properties)
            batch_projection_args = r.set_batch_projection_args(
                args, fields=fields,
                dataset_fields=test_fields)

            remote_projection(pca, test_dataset, \
                batch_projection_args, args, \
                api, resume, projection_file=output, \
                session_file=session_file, path=path, log=log)

        else:
            projection(pca, fields, args,
                       session_file=session_file)


    u.print_generated_files(path, log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
示例#42
0
def compute_output(api, args):
    """ Creates a fusion using the `models` list or uses the ids
    of a previously created BigML fusion to make predictions for the `test_set`.

    """

    fusion = None

    # variables from command-line options
    resume = args.resume_
    fusion_ids = args.fusion_ids_
    output = args.predictions
    # there's only one fusion to be generated at present
    args.max_parallel_fusions = 1
    # fusion cannot be published yet.
    args.public_fusion = False

    # It is compulsory to have a description to publish either datasets or
    # fusions
    if (not args.description_ and args.public_fusion):
        sys.exit("You should provide a description to publish.")

    path = u.check_dir(output)
    session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    if args.fusion_file:
        # fusion regression is retrieved from the contents of the given local
        # JSON file
        fusion, csv_properties, fields = u.read_local_resource(
            args.fusion_file,
            csv_properties=csv_properties)
        fusion_ids = [fusion]
    else:
        # fusion is retrieved from the remote object or created
        fusion, resume = \
            pf.fusion_processing( \
            fusion, fusion_ids, \
            api, args, resume, \
            session_file=session_file, path=path, log=log)

    # We update the fusion public state if needed
    if fusion:
        if isinstance(fusion, basestring):
            if not a.has_test(args):
                query_string = MINIMUM_MODEL
            elif args.export_fields:
                query_string = r.ALL_FIELDS_QS
            else:
                query_string = ''
            fusion = u.check_resource(fusion,
                                      api.get_fusion,
                                      query_string=query_string)
        if (args.public_fusion or
                (args.shared_flag and r.shared_changed(args.shared,
                                                       fusion))):
            fusion_args = {}
            if args.shared_flag and r.shared_changed(args.shared,
                                                     fusion):
                fusion_args.update(shared=args.shared)
            if args.public_fusion:
                fusion_args.update( \
                    r.set_publish_fusion_args(args))
            if fusion_args:
                fusion = r.update_fusion( \
                    fusion, fusion_args, args,
                    api=api, path=path, \
                    session_file=session_file)

    # We get the fields of the fusion if we haven't got
    # them yet and need them
    if fusion and (args.test_set or args.evaluate):
        fields = pf.get_fusion_fields( \
            fusion, csv_properties, args)


    # If predicting
    if fusion and (a.has_test(args) or \
            args.remote):
        test_dataset = get_test_dataset(args)

        # Remote predictions: predictions are computed as batch predictions
        # in bigml.com except when --no-batch flag is set on
        if args.remote and not args.no_batch:
            # create test source from file
            test_name = "%s - test" % args.name
            if args.test_source is None:
                test_properties = ps.test_source_processing(
                    api, args, resume, name=test_name,
                    session_file=session_file, path=path, log=log)
                (test_source, resume,
                 csv_properties, test_fields) = test_properties
            else:
                test_source_id = bigml.api.get_source_id(args.test_source)
                test_source = api.check_resource(test_source_id)
            if test_dataset is None:
                # create test dataset from test source
                dataset_args = r.set_basic_dataset_args(args, name=test_name)
                test_dataset, resume = pd.alternative_dataset_processing(
                    test_source, "test", dataset_args, api, args,
                    resume, session_file=session_file, path=path, log=log)
            else:
                test_dataset_id = bigml.api.get_dataset_id(test_dataset)
                test_dataset = api.check_resource(test_dataset_id)

            csv_properties.update(objective_field=None,
                                  objective_field_present=False)
            test_fields = pd.get_fields_structure(test_dataset,
                                                  csv_properties)
            if not args.evaluate:
                batch_prediction_args = r.set_batch_prediction_args(
                    args, fields=fields,
                    dataset_fields=test_fields)

                remote_prediction(fusion, test_dataset, \
                    batch_prediction_args, args, \
                    api, resume, prediction_file=output, \
                    session_file=session_file, path=path, log=log)

        else:
            prediction([fusion], fields, args,
                       session_file=session_file)


    # If evaluate flag is on, create remote evaluation and save results in
    # json and human-readable format.
    if args.evaluate:
        # When we resume evaluation and models were already completed, we
        # should use the datasets array as test datasets
        args.max_parallel_evaluations = 1 # only one evaluation at present
        args.cross_validation_rate = 0 # no cross-validation
        args.number_of_evaluations = 1 # only one evaluation
        if args.has_test_datasets_:
            test_dataset = get_test_dataset(args)
            dataset = test_dataset
            dataset = u.check_resource(dataset, api=api,
                                       query_string=r.ALL_FIELDS_QS)
            dataset_fields = pd.get_fields_structure(dataset, None)
            resume = evaluate([fusion], [dataset], api,
                              args, resume,
                              fields=fields, dataset_fields=dataset_fields,
                              session_file=session_file, path=path,
                              log=log,
                              objective_field=args.objective_field)


    u.print_generated_files(path, log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
示例#43
0
def analyze_dispatcher(args=sys.argv[1:]):
    """Main processing of the parsed options for BigMLer analyze

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command = command_handling(args, COMMAND_LOG)

    # Parses command line arguments.
    command_args = command.parser.parse_args(command.args)
    resume = command_args.resume
    if resume:
        command_args, session_file, _ = get_stored_command(
            args, command_args.debug, command_log=COMMAND_LOG,
            dirs_log=DIRS_LOG, sessions_log=SESSIONS_LOG)
    else:
        if command_args.output_dir is None:
            command_args.output_dir = a.NOW
        session_file = os.path.join(command_args.output_dir,
                                    SESSIONS_LOG)
        # If logging is required, open the file for logging
        log = None
        if command_args.log_file:
            u.check_dir(command_args.log_file)
            log = command_args.log_file
            # If --clear_logs the log files are cleared
            if command_args.clear_logs:
                clear_log_files([log])

        if command_args.model_fields:
            model_fields = command_args.model_fields.split(',')
            command_args.model_fields_ = [model_field.strip()
                                          for model_field in model_fields]
        else:
            command_args.model_fields_ = {}
        u.sys_log_message(u"%s\n" % os.path.abspath(command_args.output_dir),
                          log_file=DIRS_LOG)
        session_file = os.path.join(command_args.output_dir, SESSIONS_LOG)
    # create api instance form args
    api = a.get_api_instance(command_args,
                             u.check_dir(session_file))
    # --maximize flag will be deprecated. Use --optimize flag.
    if command_args.maximize is not None and command_args.optimize is None:
        command_args.optimize = command_args.maximize
    incompatible_flags = [command_args.cv, command_args.features,
                          command_args.nodes, command_args.random_fields]
    if sum([int(bool(flag)) for flag in incompatible_flags]) > 1:
        sys.exit("The following flags cannot be used together:\n    --features"
                 "\n    --cross-validation\n    --nodes\n    --random-fields")
    if (command_args.dataset is None and command_args.datasets is None and
            command_args.dataset_file is None):
        sys.exit("The analyze command needs an existing dataset ID. Please, "
                 "use the --dataset flag.")
    if not any(incompatible_flags):
        sys.exit("You need to specify the type of analysis: features, node "
                 "threshold, cross validation or random fields.")
    # k-fold cross-validation
    if command_args.cv and command_args.dataset is not None:
        create_kfold_cv(command_args, api, command.common_options,
                        resume=resume)

    # features analysis
    elif command_args.features:
        create_features_analysis(command_args, api, command.common_options,
                                 resume=resume)

    # node threshold analysis
    elif command_args.nodes:
        create_nodes_analysis(command_args, api, command.common_options,
                              resume=resume)

    # random fields analysis
    elif command_args.random_fields:
        create_candidates_analysis(command_args, api, command.common_options,
                                   resume=resume)
    else:
        sys.exit("You must choose one of the available analysis: --features,"
                 " --nodes, --random-fields or --cross-validation. Add"
                 " your prefered option to"
                 " the command line or type\n    bigmler analyze --help\n"
                 " to see all the available options.")
示例#44
0
def compute_output(api, args):
    """ Creates one or more models using the `training_set` or uses the ids
    of previously created BigML models to make predictions for the `test_set`.

    """

    pca = None

    # variables from command-line options
    resume = args.resume_
    pca_ids = args.pca_ids_
    output = args.projections
    # there's only one pca to be generated at present
    args.max_parallel_pcas = 1
    # pca cannot be published yet.
    args.public_pca = False

    # It is compulsory to have a description to publish either datasets or
    # pcas
    if (not args.description_ and (args.public_pca or args.public_dataset)):
        sys.exit("You should provide a description to publish.")

    # When using --new-fields, it is compulsory to specify also a dataset
    # id
    if args.new_fields and not args.dataset:
        sys.exit("To use --new-fields you must also provide a dataset id"
                 " to generate the new dataset from it.")

    path = u.check_dir(output)
    session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    if args.objective_field:
        csv_properties.update({'objective_field': args.objective_field})
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # basic pre-model step: creating or retrieving the source related info
    source, resume, csv_properties, fields = pms.get_source_info(
        api, args, resume, csv_properties, session_file, path, log)
    # basic pre-model step: creating or retrieving the dataset related info
    dataset_properties = pms.get_dataset_info(api, args, resume, source,
                                              csv_properties, fields,
                                              session_file, path, log)
    (_, datasets, test_dataset, resume, csv_properties,
     fields) = dataset_properties
    if args.pca_file:
        # pca regression is retrieved from the contents of the given local
        # JSON file
        pca, csv_properties, fields = u.read_local_resource(
            args.pca_file, csv_properties=csv_properties)
        pac_ids = [pca]
    else:
        # pca is retrieved from the remote object or created
        pca, resume = \
            pc.pca_processing( \
            datasets, pca, pca_ids, \
            api, args, resume, fields=fields, \
            session_file=session_file, path=path, log=log)

    # We update the pca public state if needed
    if pca:
        if isinstance(pca, basestring):
            if not a.has_test(args):
                query_string = MINIMUM_MODEL
            elif args.export_fields:
                query_string = r.ALL_FIELDS_QS
            else:
                query_string = ''
            pca = u.check_resource(pca, api.get_pca, query_string=query_string)
        if (args.public_pca
                or (args.shared_flag and r.shared_changed(args.shared, pca))):
            pca_args = {}
            if args.shared_flag and r.shared_changed(args.shared, pca):
                pca_args.update(shared=args.shared)
            if args.public_pca:
                pca_args.update( \
                    r.set_publish_pca_args(args))
            if pca_args:
                pca = r.update_pca( \
                    pca, pca_args, args,
                    api=api, path=path, \
                    session_file=session_file)

    # We get the fields of the pca if we haven't got
    # them yet and need them
    if pca and (args.test_set or args.export_fields):
        fields = pc.get_pca_fields( \
            pca, csv_properties, args)

    if fields and args.export_fields:
        fields.summary_csv(os.path.join(path, args.export_fields))

    # If predicting
    if pca and (a.has_test(args) or \
            (test_dataset and args.remote)):
        if test_dataset is None:
            test_dataset = get_test_dataset(args)

        # Remote projections: projections are computed as batch projections
        # in bigml.com except when --no-batch flag is set on
        if args.remote and not args.no_batch:
            # create test source from file
            test_name = "%s - test" % args.name
            if args.test_source is None:
                test_properties = ps.test_source_processing(
                    api,
                    args,
                    resume,
                    name=test_name,
                    session_file=session_file,
                    path=path,
                    log=log)
                (test_source, resume, csv_properties,
                 test_fields) = test_properties
            else:
                test_source_id = bigml.api.get_source_id(args.test_source)
                test_source = api.check_resource(test_source_id)
            if test_dataset is None:
                # create test dataset from test source
                dataset_args = r.set_basic_dataset_args(args, name=test_name)
                test_dataset, resume = pd.alternative_dataset_processing(
                    test_source,
                    "test",
                    dataset_args,
                    api,
                    args,
                    resume,
                    session_file=session_file,
                    path=path,
                    log=log)
            else:
                test_dataset_id = bigml.api.get_dataset_id(test_dataset)
                test_dataset = api.check_resource(test_dataset_id)

            csv_properties.update(objective_field=None,
                                  objective_field_present=False)
            test_fields = pd.get_fields_structure(test_dataset, csv_properties)
            batch_projection_args = r.set_batch_projection_args(
                args, fields=fields, dataset_fields=test_fields)

            remote_projection(pca, test_dataset, \
                batch_projection_args, args, \
                api, resume, projection_file=output, \
                session_file=session_file, path=path, log=log)

        else:
            projection(pca, fields, args, session_file=session_file)

    u.print_generated_files(path,
                            log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
示例#45
0
def compute_output(api, args):
    """ Creates a sample based on a `train_set`, source or dataset.

    """

    samples = None
    # variables from command-line options
    resume = args.resume_
    sample_ids = args.sample_ids_
    output = args.predictions
    # there's only one sample to be generated at present
    args.max_parallel_clusters = 1
    # sample cannot be published yet.
    args.public_sample = False

    # It is compulsory to have a description to publish either datasets or
    # clusters
    if (not args.description_ and (args.public_sample or
                                   args.public_dataset)):
        sys.exit("You should provide a description to publish.")

    # When using --new-fields, it is compulsory to specify also a dataset
    # id
    if args.new_fields and not args.dataset:
        sys.exit("To use --new-fields you must also provide a dataset id"
                 " to generate the new dataset from it.")

    path = u.check_dir(output)
    session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # basic pre-sample step: creating or retrieving the source related info
    source, resume, csv_properties, fields = pms.get_source_info(
        api, args, resume, csv_properties, session_file, path, log)
    # basic pre-sample step: creating or retrieving the dataset related info
    dataset_properties = pms.get_dataset_info(
        api, args, resume, source,
        csv_properties, fields, session_file, path, log)
    (_, datasets, _, resume,
     csv_properties, fields) = dataset_properties
    if args.sample_file:
        # sample is retrieved from the contents of the given local JSON file
        sample, csv_properties, fields = u.read_local_resource(
            args.sample_file,
            csv_properties=csv_properties)
        samples = [sample]
        sample_ids = [sample['resource']]
    else:
        # sample is retrieved from the remote object
        samples, sample_ids, resume = psa.samples_processing(
            datasets, samples, sample_ids, api, args, resume,
            session_file=session_file, path=path, log=log)
        if samples:
            sample = samples[0]

    # We update the sample's public state if needed
    if sample:
        if isinstance(sample, basestring):
            # build the query string from the sample options
            sample = u.check_resource(sample, api.get_sample)
        samples[0] = sample
        if (args.public_sample or
                (args.shared_flag and r.shared_changed(args.shared, sample))):
            sample_args = {}
            if args.shared_flag and r.shared_changed(args.shared, sample):
                sample_args.update(shared=args.shared)
            if args.public_sample:
                sample_args.update(r.set_publish_sample_args(args))
            if sample_args:
                sample = r.update_sample(sample, sample_args, args,
                                         api=api, path=path,
                                         session_file=session_file)
                samples[0] = sample

    # We get the fields of the sample if we haven't got
    # them yet and need them
    if sample and psa.needs_sample_fields(args):
        fields = psa.get_sample_fields(sample, csv_properties, args)

    if fields and args.export_fields:
        fields.summary_csv(os.path.join(path, args.export_fields))

    sample_file(samples[0], fields, args, api, path=path,
                session_file=session_file)

    u.print_generated_files(path, log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
示例#46
0
def compute_output(api, args):
    """ Creates one or more models using the `training_set` or uses the ids
    of previously created BigML models to make predictions for the `test_set`.

    """

    time_series = None
    time_series_set = None

    # variables from command-line options
    resume = args.resume_
    time_series_ids = args.time_series_ids_
    output = args.predictions
    # there's only one time_series to be generated at present
    args.max_parallel_time_series = 1
    args.max_parallel_evaluations = 1
    # time_series cannot be published yet.
    args.public_time_series = False
    # no cross-validations
    args.dataset_off = False
    args.cross_validation_rate = 0
    args.number_of_evaluations = 1

    # It is compulsory to have a description to publish either datasets or
    # time_series
    if (not args.description_
            and (args.public_time_series or args.public_dataset)):
        sys.exit("You should provide a description to publish.")

    # When using --new-fields, it is compulsory to specify also a dataset
    # id
    if args.new_fields and not args.dataset:
        sys.exit("To use --new-fields you must also provide a dataset id"
                 " to generate the new dataset from it.")

    path = u.check_dir(output)
    session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    if args.objective_field:
        csv_properties.update({'objective_field': args.objective_field})
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # basic pre-model step: creating or retrieving the source related info
    source, resume, csv_properties, fields = pms.get_source_info(
        api, args, resume, csv_properties, session_file, path, log)
    # basic pre-model step: creating or retrieving the dataset related info
    dataset_properties = pms.get_dataset_info(api, args, resume, source,
                                              csv_properties, fields,
                                              session_file, path, log)
    (_, datasets, test_dataset, resume, csv_properties,
     fields) = dataset_properties
    if datasets:
        # Now we have a dataset, let's check if there's an objective_field
        # given by the user and update it in the fields structure
        args.objective_id_ = get_objective_id(args, fields)
        # if the time series is going to be evaluated, and we don't have
        # test data, we need to divide the rows using ranges, so we'll need
        # max rows
        args.max_rows = datasets[0]["object"]["rows"]
    if args.time_series_file:
        # time-series is retrieved from the contents of the given local
        # JSON file
        time_series, csv_properties, fields = u.read_local_resource(
            args.time_series_file, csv_properties=csv_properties)
        time_series_set = [time_series]
        time_series_ids = [time_series['resource']]
    else:
        # time-series is retrieved from the remote object
        time_series_set, time_series_ids, resume = \
            pts.time_series_processing( \
            datasets, time_series_set, time_series_ids, \
            api, args, resume, fields=fields, \
            session_file=session_file, path=path, log=log)
        if time_series_set:
            time_series = time_series_set[0]

    # We update the time-series' public state if needed
    if time_series:
        if isinstance(time_series, basestring):
            query_string = r.ALL_FIELDS_QS
            time_series = u.check_resource(time_series,
                                           api.get_time_series,
                                           query_string=query_string)
        time_series_set[0] = time_series
        if (args.public_time_series or
            (args.shared_flag and r.shared_changed(args.shared, time_series))):
            time_series_args = {}
            if args.shared_flag and r.shared_changed(args.shared, time_series):
                time_series_args.update(shared=args.shared)
            if args.public_time_series:
                time_series_args.update( \
                    r.set_publish_time_series_args(args))
            if time_series_args:
                time_series = r.time_series( \
                    time_series, time_series_args, args,
                    api=api, path=path, \
                    session_file=session_file)
                time_series_set[0] = time_series
    """
    # We get the fields of the time-series if we haven't got
    # them yet and need them
    if time_series and (args.test_set or args.export_fields):
        fields = pts.get_time_series_fields( \
            time_series, csv_properties, args)
    """

    if fields and args.export_fields:
        fields.summary_csv(os.path.join(path, args.export_fields))

    # If forecasting
    if time_series_set and a.has_ts_test(args):
        if args.remote:
            forecast_args = r.set_forecast_args(args, fields=fields)

            remote_forecast(time_series, forecast_args, args, \
                api, resume, prediction_file=output, \
                session_file=session_file, path=path, log=log)

        else:
            forecast(time_series, args, session_file=session_file)

    # If evaluate flag is on, create remote evaluation and save results in
    # json and human-readable format.
    if args.evaluate:
        # When we resume evaluation and models were already completed, we
        # should use the datasets array as test datasets
        if args.has_test_datasets_:
            test_dataset = get_test_dataset(args)
        if args.dataset_off and not args.has_test_datasets_:
            args.test_dataset_ids = datasets
        if args.test_dataset_ids and args.dataset_off:
            # Evaluate the models with the corresponding test datasets.
            test_dataset_id = bigml.api.get_dataset_id( \
                args.test_dataset_ids[0])
            test_dataset = api.check_resource(test_dataset_id)
            csv_properties.update(objective_field=None,
                                  objective_field_present=False)
            test_fields = pd.get_fields_structure(test_dataset, csv_properties)
            resume = evaluate(time_series_set,
                              args.test_dataset_ids,
                              api,
                              args,
                              resume,
                              fields=fields,
                              dataset_fields=test_fields,
                              session_file=session_file,
                              path=path,
                              log=log,
                              objective_field=args.objective_field)
        else:
            dataset = datasets[0]
            if args.test_split > 0 or args.has_test_datasets_:
                dataset = test_dataset
            else:
                args.range_ = [
                    int(args.max_rows * r.EVALUATE_SAMPLE_RATE), args.max_rows
                ]
            dataset = u.check_resource(dataset,
                                       api=api,
                                       query_string=r.ALL_FIELDS_QS)
            dataset_fields = pd.get_fields_structure(dataset, None)
            resume = evaluate(time_series_set, [dataset],
                              api,
                              args,
                              resume,
                              fields=fields,
                              dataset_fields=dataset_fields,
                              session_file=session_file,
                              path=path,
                              log=log,
                              objective_field=args.objective_field)

    u.print_generated_files(path,
                            log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
示例#47
0
def reify_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    command = command_handling(args, COMMAND_LOG)

    # Parses command line arguments.
    command_args = a.parse_and_check(command)
    if command_args.resume:
        command_args, session_file, _ = get_stored_command(
            args, command_args.debug, command_log=COMMAND_LOG,
            dirs_log=DIRS_LOG, sessions_log=SESSIONS_LOG)
        if command_args.output is None:
            command_args.output = os.path.join(command_args.output_dir,
                                               DEFAULT_OUTPUT)
    else:
        if command_args.output_dir is None:
            command_args.output_dir = a.NOW
        if command_args.output is None:
            command_args.output = os.path.join(command_args.output_dir,
                                               DEFAULT_OUTPUT)
        if len(os.path.dirname(command_args.output).strip()) == 0:
            command_args.output = os.path.join(command_args.output_dir,
                                               command_args.output)
        directory = u.check_dir(command_args.output)
        command_args.output_dir = directory
        session_file = os.path.join(directory, SESSIONS_LOG)
        u.log_message(command.command + "\n", log_file=session_file)


        directory = u.check_dir(os.path.join(command_args.output_dir, "tmp"))
        session_file = os.path.join(directory, SESSIONS_LOG)
        u.log_message(command.command + "\n", log_file=session_file)
        try:
            shutil.copy(DEFAULTS_FILE, os.path.join(directory, DEFAULTS_FILE))
        except IOError:
            pass
        u.sys_log_message(u"%s\n" % os.path.abspath(directory),
                          log_file=DIRS_LOG)

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    def logger(message):
        """Partial to log messages according to args.verbosity

        """
        u.log_message(u.dated(message), \
            log_file=session_file, console=command_args.verbosity)

    # Creates the corresponding api instance
    api = a.get_api_instance(command_args, u.check_dir(session_file))
    message = "Starting reification for %s\n\n" % command_args.resource_id
    u.log_message(message, \
        log_file=session_file, console=command_args.verbosity)
    reify_resources(command_args, api, logger)
    message = "\nReification complete. See the results in %s\n\n" % \
        command_args.output
    u.log_message(message, \
        log_file=session_file, console=command_args.verbosity)
    u.log_message("_" * 80 + "\n", log_file=session_file)

    u.print_generated_files(command_args.output_dir, log_file=session_file,
                            verbosity=command_args.verbosity)
示例#48
0
def compute_output(api, args):
    """ Creates one or more models using the `training_set` or uses the ids
    of previously created BigML models to make predictions for the `test_set`.

    """

    logistic_regression = None
    logistic_regressions = None
    # no multi-label support at present

    # variables from command-line options
    resume = args.resume_
    logistic_regression_ids = args.logistic_regression_ids_
    output = args.predictions
    # there's only one logistic regression to be generated at present
    args.max_parallel_logistic_regressions = 1
    # logistic regressions cannot be published yet.
    args.public_logistic_regression = False

    # It is compulsory to have a description to publish either datasets or
    # logistic regressions
    if (not args.description_ and (args.public_logistic_regression or
                                   args.public_dataset)):
        sys.exit("You should provide a description to publish.")

    # When using --new-fields, it is compulsory to specify also a dataset
    # id
    if args.new_fields and not args.dataset:
        sys.exit("To use --new-fields you must also provide a dataset id"
                 " to generate the new dataset from it.")

    path = u.check_dir(output)
    session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    if args.objective_field:
        csv_properties.update({'objective_field': args.objective_field})
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # basic pre-model step: creating or retrieving the source related info
    source, resume, csv_properties, fields = pms.get_source_info(
        api, args, resume, csv_properties, session_file, path, log)
    # basic pre-model step: creating or retrieving the dataset related info
    dataset_properties = pms.get_dataset_info(
        api, args, resume, source,
        csv_properties, fields, session_file, path, log)
    (_, datasets, test_dataset,
     resume, csv_properties, fields) = dataset_properties
    if datasets:
        # Now we have a dataset, let's check if there's an objective_field
        # given by the user and update it in the fields structure
        args.objective_id_ = get_objective_id(args, fields)
    if args.logistic_file:
        # logistic regression is retrieved from the contents of the given local
        # JSON file
        logistic_regression, csv_properties, fields = u.read_local_resource(
            args.logistic_file,
            csv_properties=csv_properties)
        logistic_regressions = [logistic_regression]
        logistic_regression_ids = [logistic_regression['resource']]
    else:
        # logistic regression is retrieved from the remote object
        logistic_regressions, logistic_regression_ids, resume = \
            plr.logistic_regressions_processing( \
            datasets, logistic_regressions, logistic_regression_ids, \
            api, args, resume, fields=fields, \
            session_file=session_file, path=path, log=log)
        if logistic_regressions:
            logistic_regression = logistic_regressions[0]

    # We update the logistic regression's public state if needed
    if logistic_regression:
        if isinstance(logistic_regression, basestring):
            if not a.has_test(args):
                query_string = MINIMUM_MODEL
            elif args.export_fields:
                query_string = r.ALL_FIELDS_QS
            else:
                query_string = ''
            logistic_regression = u.check_resource(logistic_regression,
                                                   api.get_logistic_regression,
                                                   query_string=query_string)
        logistic_regressions[0] = logistic_regression
        if (args.public_logistic_regression or
                (args.shared_flag and r.shared_changed(args.shared,
                                                       logistic_regression))):
            logistic_regression_args = {}
            if args.shared_flag and r.shared_changed(args.shared,
                                                     logistic_regression):
                logistic_regression_args.update(shared=args.shared)
            if args.public_logistic_regression:
                logistic_regression_args.update( \
                    r.set_publish_logistic_regression_args(args))
            if logistic_regression_args:
                logistic_regression = r.update_logistic_regression( \
                    logistic_regression, logistic_regression_args, args,
                    api=api, path=path, \
                    session_file=session_file)
                logistic_regressions[0] = logistic_regression

    # We get the fields of the logistic_regression if we haven't got
    # them yet and need them
    if logistic_regression and (args.test_set or args.export_fields):
        fields = plr.get_logistic_fields( \
            logistic_regression, csv_properties, args)

    if fields and args.export_fields:
       fields.summary_csv(os.path.join(path, args.export_fields))

    # If predicting
    if logistic_regressions and (a.has_test(args) or \
            (test_dataset and args.remote)):
        if test_dataset is None:
            test_dataset = get_test_dataset(args)

        # Remote predictions: predictions are computed as batch predictions
        # in bigml.com except when --no-batch flag is set on
        if args.remote and not args.no_batch:
            # create test source from file
            test_name = "%s - test" % args.name
            if args.test_source is None:
                test_properties = ps.test_source_processing(
                    api, args, resume, name=test_name,
                    session_file=session_file, path=path, log=log)
                (test_source, resume,
                 csv_properties, test_fields) = test_properties
            else:
                test_source_id = bigml.api.get_source_id(args.test_source)
                test_source = api.check_resource(test_source_id)
            if test_dataset is None:
                # create test dataset from test source
                dataset_args = r.set_basic_dataset_args(args, name=test_name)
                test_dataset, resume = pd.alternative_dataset_processing(
                    test_source, "test", dataset_args, api, args,
                    resume, session_file=session_file, path=path, log=log)
            else:
                test_dataset_id = bigml.api.get_dataset_id(test_dataset)
                test_dataset = api.check_resource(test_dataset_id)

            csv_properties.update(objective_field=None,
                                  objective_field_present=False)
            test_fields = pd.get_fields_structure(test_dataset,
                                                  csv_properties)
            batch_prediction_args = r.set_batch_prediction_args(
                args, fields=fields,
                dataset_fields=test_fields)

            remote_lr_prediction(logistic_regression, test_dataset, \
                batch_prediction_args, args, \
                api, resume, prediction_file=output, \
                session_file=session_file, path=path, log=log)

        else:
            lr_prediction(logistic_regressions, fields, args,
                          session_file=session_file)

    # If evaluate flag is on, create remote evaluation and save results in
    # json and human-readable format.
    if args.evaluate:
        # When we resume evaluation and models were already completed, we
        # should use the datasets array as test datasets
        if args.has_test_datasets_:
            test_dataset = get_test_dataset(args)
        if args.dataset_off and not args.has_test_datasets_:
            args.test_dataset_ids = datasets
        if args.test_dataset_ids and args.dataset_off:
            # Evaluate the models with the corresponding test datasets.
            resume = evaluate(logistic_regressions, args.test_dataset_ids, api,
                              args, resume,
                              fields=fields, dataset_fields=dataset_fields,
                              session_file=session_file, path=path,
                              log=log, labels=labels, all_labels=all_labels,
                              objective_field=args.objective_field)

    u.print_generated_files(path, log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
示例#49
0
def compute_output(api, args):
    """ Creates one or more models using the `training_set` or uses the ids
    of previously created BigML models to make predictions for the `test_set`.

    """

    cluster = None
    clusters = None
    # no multi-label support at present

    # variables from command-line options
    resume = args.resume_
    cluster_ids = args.cluster_ids_
    output = args.predictions
    # there's only one cluster to be generated at present
    args.max_parallel_clusters = 1
    # clusters cannot be published yet.
    args.public_cluster = False

    # It is compulsory to have a description to publish either datasets or
    # clusters
    if (not args.description_ and (args.public_cluster or
                                   args.public_dataset)):
        sys.exit("You should provide a description to publish.")

    # When using --new-fields, it is compulsory to specify also a dataset
    # id
    if args.new_fields and not args.dataset:
        sys.exit("To use --new-fields you must also provide a dataset id"
                 " to generate the new dataset from it.")

    path = u.check_dir(output)
    session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # basic pre-model step: creating or retrieving the source related info
    source, resume, csv_properties, fields = pms.get_source_info(
        api, args, resume, csv_properties, session_file, path, log)
    # basic pre-model step: creating or retrieving the dataset related info
    dataset_properties = pms.get_dataset_info(
        api, args, resume, source,
        csv_properties, fields, session_file, path, log)
    (_, datasets, test_dataset,
     resume, csv_properties, fields) = dataset_properties
    if args.cluster_file:
        # cluster is retrieved from the contents of the given local JSON file
        cluster, csv_properties, fields = u.read_local_resource(
            args.cluster_file,
            csv_properties=csv_properties)
        clusters = [cluster]
        cluster_ids = [cluster['resource']]
    else:
        # cluster is retrieved from the remote object
        clusters, cluster_ids, resume = pc.clusters_processing(
            datasets, clusters, cluster_ids, api, args, resume, fields=fields,
            session_file=session_file, path=path, log=log)
        if clusters:
            cluster = clusters[0]

    # We update the cluster's public state if needed
    if cluster:
        if isinstance(cluster, basestring):
            if args.cluster_datasets is None and not a.has_test(args):
                query_string = MINIMUM_MODEL
            else:
                query_string = ''
            cluster = u.check_resource(cluster, api.get_cluster,
                                       query_string=query_string)
        clusters[0] = cluster
        if (args.public_cluster or
                (args.shared_flag and r.shared_changed(args.shared, cluster))):
            cluster_args = {}
            if args.shared_flag and r.shared_changed(args.shared, cluster):
                cluster_args.update(shared=args.shared)
            if args.public_cluster:
                cluster_args.update(r.set_publish_cluster_args(args))
            if cluster_args:
                cluster = r.update_cluster(cluster, cluster_args, args,
                                           api=api, path=path,
                                           session_file=session_file)
                clusters[0] = cluster

    # We get the fields of the cluster if we haven't got
    # them yet and need them
    if cluster and args.test_set:
        fields = pc.get_cluster_fields(cluster, csv_properties, args)

    # If predicting
    if clusters and (a.has_test(args) or (test_dataset and args.remote)):
        if test_dataset is None:
            test_dataset = get_test_dataset(args)

        # Remote centroids: centroids are computed as batch centroids
        # in bigml.com except when --no-batch flag is set on
        if args.remote and not args.no_batch:
            # create test source from file
            test_name = "%s - test" % args.name
            if args.test_source is None:
                test_properties = ps.test_source_processing(
                    api, args, resume, name=test_name,
                    session_file=session_file, path=path, log=log)
                (test_source, resume,
                 csv_properties, test_fields) = test_properties
            else:
                test_source_id = bigml.api.get_source_id(args.test_source)
                test_source = api.check_resource(test_source_id)
            if test_dataset is None:
                # create test dataset from test source
                dataset_args = r.set_basic_dataset_args(args, name=test_name)
                test_dataset, resume = pd.alternative_dataset_processing(
                    test_source, "test", dataset_args, api, args,
                    resume, session_file=session_file, path=path, log=log)
            else:
                test_dataset_id = bigml.api.get_dataset_id(test_dataset)
                test_dataset = api.check_resource(test_dataset_id)
            test_fields = pd.get_fields_structure(test_dataset,
                                                  csv_properties)
            batch_centroid_args = r.set_batch_centroid_args(
                args, fields=fields,
                dataset_fields=test_fields)

            remote_centroid(cluster, test_dataset, batch_centroid_args, args,
                            api, resume, prediction_file=output,
                            session_file=session_file, path=path, log=log)

        else:
            centroid(clusters, fields, args, session_file=session_file)

    if cluster and args.cluster_datasets is not None:
        centroids_info = cluster['object']['clusters']['clusters']
        centroids = {centroid['name']: centroid['id']
                     for centroid in centroids_info}
        datasets = cluster['object']['cluster_datasets']
        if args.cluster_datasets == '':
            centroid_ids = centroids.values()
        else:
            centroid_ids = [centroids[cluster_name] for cluster_name in
                            args.cluster_datasets_
                            if datasets[centroids[cluster_name]] == '']

        for centroid_id in centroid_ids:
            dataset_args = {'centroid': centroid_id}
            r.create_dataset(cluster, dataset_args, args, api=api, path=path,
                             session_file=session_file, log=log,
                             dataset_type='cluster')

    u.print_generated_files(path, log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
示例#50
0
def compute_output(api, args):
    """ Creates one or more models using the `training_set` or uses the ids
    of previously created BigML models to make predictions for the `test_set`.

    """

    association = None
    associations = None
    # no multi-label support at present

    # variables from command-line options
    resume = args.resume_
    association_ids = args.association_ids_
    output = args.predictions
    # there's only one association resource to be generated at present
    args.max_parallel_associations = 1
    # associations cannot be published yet.
    args.public_association = False

    # It is compulsory to have a description to publish either datasets or
    # associations
    if (not args.description_ and (args.public_association or
                                   args.public_dataset)):
        sys.exit("You should provide a description to publish.")

    # When using --new-fields, it is compulsory to specify also a dataset
    # id
    if args.new_fields and not args.dataset:
        sys.exit("To use --new-fields you must also provide a dataset id"
                 " to generate the new dataset from it.")

    path = u.check_dir(output)
    session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # basic pre-model step: creating or retrieving the source related info
    source, resume, csv_properties, fields = pms.get_source_info(
        api, args, resume, csv_properties, session_file, path, log)
    # basic pre-model step: creating or retrieving the dataset related info
    dataset_properties = pms.get_dataset_info(
        api, args, resume, source,
        csv_properties, fields, session_file, path, log)
    (_, datasets, test_dataset,
     resume, csv_properties, fields) = dataset_properties
    if args.association_file:
        # association is retrieved from the contents of the given local JSON
        # file
        association, csv_properties, fields = u.read_local_resource(
            args.association_file,
            csv_properties=csv_properties)
        associations = [association]
        association_ids = [association['resource']]
    else:
        # association is retrieved from the remote object
        associations, association_ids, resume = pa.associations_processing(
            datasets, associations, association_ids, api, args, resume,
            fields=fields,
            session_file=session_file, path=path, log=log)
        if associations:
            association = associations[0]

    # We update the association's public state if needed
    if association:
        if isinstance(association, basestring):
            if not a.has_test(args):
                query_string = MINIMUM_MODEL
            else:
                query_string = ''
            association = u.check_resource(association, api.get_association,
                                           query_string=query_string)
        associations[0] = association
        if (args.public_association or
                (args.shared_flag and
                 r.shared_changed(args.shared, association))):
            association_args = {}
            if args.shared_flag and \
                    r.shared_changed(args.shared, association):
                association_args.update(shared=args.shared)
            if args.public_association:
                association_args.update(ras.set_publish_association_args(args))
            if association_args:
                association = ras.update_association( \
                    association, association_args, args,
                    api=api, path=path,
                    session_file=session_file)
                associations[0] = association

    # We get the fields of the association if we haven't got
    # them yet and need them
    if association and args.test_set:
        fields = pa.get_association_fields(association, csv_properties, args)

    # If predicting
    if associations and (a.has_test(args) or (test_dataset and args.remote)):
        if test_dataset is None:
            test_dataset = get_test_dataset(args)

        # Remote association sets: association sets are computed as
        # batch association sets
        # in bigml.com except when --no-batch flag is set. They are currently
        # not supported yet
        if args.remote and not args.no_batch:
            sys.exit("Batch association sets are currently not supported.")
        else:
            sys.exit("Local prediction of association sets is currently"
                     " not supported.")
    u.print_generated_files(path, log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
示例#51
0
def compute_output(api, args):
    """ Creates one or more models using the `training_set` or uses the ids
    of previously created BigML models to make predictions for the `test_set`.

    """

    time_series = None
    time_series_set = None

    # variables from command-line options
    resume = args.resume_
    time_series_ids = args.time_series_ids_
    output = args.predictions
    # there's only one time_series to be generated at present
    args.max_parallel_time_series = 1
    args.max_parallel_evaluations = 1
    # time_series cannot be published yet.
    args.public_time_series = False
    # no cross-validations
    args.dataset_off = False
    args.cross_validation_rate = 0
    args.number_of_evaluations = 1

    # It is compulsory to have a description to publish either datasets or
    # time_series
    if (not args.description_ and (args.public_time_series or
                                   args.public_dataset)):
        sys.exit("You should provide a description to publish.")

    # When using --new-fields, it is compulsory to specify also a dataset
    # id
    if args.new_fields and not args.dataset:
        sys.exit("To use --new-fields you must also provide a dataset id"
                 " to generate the new dataset from it.")

    path = u.check_dir(output)
    session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    if args.objective_field:
        csv_properties.update({'objective_field': args.objective_field})
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # basic pre-model step: creating or retrieving the source related info
    source, resume, csv_properties, fields = pms.get_source_info(
        api, args, resume, csv_properties, session_file, path, log)
    # basic pre-model step: creating or retrieving the dataset related info
    dataset_properties = pms.get_dataset_info(
        api, args, resume, source,
        csv_properties, fields, session_file, path, log)
    (_, datasets, test_dataset,
     resume, csv_properties, fields) = dataset_properties
    if datasets:
        # Now we have a dataset, let's check if there's an objective_field
        # given by the user and update it in the fields structure
        args.objective_id_ = get_objective_id(args, fields)
        # if the time series is going to be evaluated, and we don't have
        # test data, we need to divide the rows using ranges, so we'll need
        # max rows
        args.max_rows = datasets[0]["object"]["rows"]
    if args.time_series_file:
        # time-series is retrieved from the contents of the given local
        # JSON file
        time_series, csv_properties, fields = u.read_local_resource(
            args.time_series_file,
            csv_properties=csv_properties)
        time_series_set = [time_series]
        time_series_ids = [time_series['resource']]
    else:
        # time-series is retrieved from the remote object
        time_series_set, time_series_ids, resume = \
            pts.time_series_processing( \
            datasets, time_series_set, time_series_ids, \
            api, args, resume, fields=fields, \
            session_file=session_file, path=path, log=log)
        if time_series_set:
            time_series = time_series_set[0]

    # We update the time-series' public state if needed
    if time_series:
        if isinstance(time_series, basestring):
            query_string = r.ALL_FIELDS_QS
            time_series = u.check_resource(time_series,
                                           api.get_time_series,
                                           query_string=query_string)
        time_series_set[0] = time_series
        if (args.public_time_series or
                (args.shared_flag and r.shared_changed(args.shared,
                                                       time_series))):
            time_series_args = {}
            if args.shared_flag and r.shared_changed(args.shared,
                                                     time_series):
                time_series_args.update(shared=args.shared)
            if args.public_time_series:
                time_series_args.update( \
                    r.set_publish_time_series_args(args))
            if time_series_args:
                time_series = r.time_series( \
                    time_series, time_series_args, args,
                    api=api, path=path, \
                    session_file=session_file)
                time_series_set[0] = time_series

    """
    # We get the fields of the time-series if we haven't got
    # them yet and need them
    if time_series and (args.test_set or args.export_fields):
        fields = pts.get_time_series_fields( \
            time_series, csv_properties, args)
    """

    if fields and args.export_fields:
        fields.summary_csv(os.path.join(path, args.export_fields))

    # If forecasting
    if time_series_set and a.has_ts_test(args):
        if args.remote:
            forecast_args = r.set_forecast_args(
                args, fields=fields)

            remote_forecast(time_series, forecast_args, args, \
                api, resume, prediction_file=output, \
                session_file=session_file, path=path, log=log)

        else:
            forecast(time_series, args,
                     session_file=session_file)

    # If evaluate flag is on, create remote evaluation and save results in
    # json and human-readable format.
    if args.evaluate:
        # When we resume evaluation and models were already completed, we
        # should use the datasets array as test datasets
        if args.has_test_datasets_:
            test_dataset = get_test_dataset(args)
        if args.dataset_off and not args.has_test_datasets_:
            args.test_dataset_ids = datasets
        if args.test_dataset_ids and args.dataset_off:
            # Evaluate the models with the corresponding test datasets.
            test_dataset_id = bigml.api.get_dataset_id( \
                args.test_dataset_ids[0])
            test_dataset = api.check_resource(test_dataset_id)
            csv_properties.update(objective_field=None,
                                  objective_field_present=False)
            test_fields = pd.get_fields_structure(test_dataset,
                                                  csv_properties)
            resume = evaluate(time_series_set, args.test_dataset_ids, api,
                              args, resume,
                              fields=fields, dataset_fields=test_fields,
                              session_file=session_file, path=path,
                              log=log,
                              objective_field=args.objective_field)
        else:
            dataset = datasets[0]
            if args.test_split > 0 or args.has_test_datasets_:
                dataset = test_dataset
            else:
                args.range_ = [int(args.max_rows * r.EVALUATE_SAMPLE_RATE),
                               args.max_rows]
            dataset = u.check_resource(dataset, api=api,
                                       query_string=r.ALL_FIELDS_QS)
            dataset_fields = pd.get_fields_structure(dataset, None)
            resume = evaluate(time_series_set, [dataset], api,
                              args, resume,
                              fields=fields, dataset_fields=dataset_fields,
                              session_file=session_file, path=path,
                              log=log,
                              objective_field=args.objective_field)


    u.print_generated_files(path, log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
示例#52
0
def compute_output(api, args):
    """ Creates one or more models using the `training_set` or uses the ids
    of previously created BigML models to make predictions for the `test_set`.

    """

    topic_model = None
    topic_models = None
    # no multi-label support at present

    # variables from command-line options
    resume = args.resume_
    topic_model_ids = args.topic_model_ids_
    output = args.predictions
    # there's only one topic model resource to be generated at present
    args.max_parallel_topic_models = 1
    # topic models cannot be published yet.
    args.public_topic_model = False

    # It is compulsory to have a description to publish either datasets or
    # topic models
    if (not args.description_ and (args.public_topic_model or
                                   args.public_dataset)):
        sys.exit("You should provide a description to publish.")

    # When using --new-fields, it is compulsory to specify also a dataset
    # id
    if args.new_fields and not args.dataset:
        sys.exit("To use --new-fields you must also provide a dataset id"
                 " to generate the new dataset from it.")

    path = u.check_dir(output)
    session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # basic pre-model step: creating or retrieving the source related info
    source, resume, csv_properties, fields = pms.get_source_info(
        api, args, resume, csv_properties, session_file, path, log)
    # basic pre-model step: creating or retrieving the dataset related info
    dataset_properties = pms.get_dataset_info(
        api, args, resume, source,
        csv_properties, fields, session_file, path, log)
    (_, datasets, test_dataset,
     resume, csv_properties, fields) = dataset_properties
    if args.topic_model_file:
        # topic model is retrieved from the contents of the given local JSON
        # file
        topic_model, csv_properties, fields = u.read_local_resource(
            args.topic_model_file,
            csv_properties=csv_properties)
        topic_models = [topic_model]
        topic_model_ids = [topic_model['resource']]
    else:
        # topic model is retrieved from the remote object
        topic_models, topic_model_ids, resume = pt.topic_model_processing(
            datasets, topic_models, topic_model_ids, api, args, resume,
            fields=fields,
            session_file=session_file, path=path, log=log)
        if topic_models:
            topic_model = topic_models[0]

    # We update the topic model's public state if needed
    if topic_model:
        if isinstance(topic_model, basestring):
            if not a.has_test(args):
                query_string = MINIMUM_MODEL
            else:
                query_string = ''
            topic_model = u.check_resource(topic_model, api.topic_model,
                                           query_string=query_string)
        topic_models[0] = topic_model
        if (args.public_topic_model or
                (args.shared_flag and
                 r.shared_changed(args.shared, topic_model))):
            topic_model_args = {}
            if args.shared_flag and \
                    r.shared_changed(args.shared, topic_model):
                topic_model_args.update(shared=args.shared)
            if args.public_topic_model:
                topic_model_args.update(r.set_publish_topic_model_args(args))
            if topic_model_args:
                topic_model = r.update_topic_model( \
                    topic_model, topic_model_args, args,
                    api=api, path=path,
                    session_file=session_file)
                topic_models[0] = topic_model

    # We get the fields of the topic model if we haven't got
    # them yet and need them
    if topic_model and args.test_set:
        csv_properties.update({'objective_field_present': False,
                               'objective_field': None})
        fields = pt.get_topic_model_fields(topic_model, csv_properties, args)

    # If predicting
    if topic_models and (a.has_test(args) or (test_dataset and args.remote)):
        if test_dataset is None:
            test_dataset = get_test_dataset(args)

        # Remote topic distributions:topic distributions are computed as
        # batch topic distributions
        # in bigml.com except when --no-batch flag is set.
        if args.remote and not args.no_batch:
            # create test source from file
            test_name = "%s - test" % args.name
            if args.test_source is None:
                test_properties = ps.test_source_processing(
                    api, args, resume, name=test_name,
                    session_file=session_file, path=path, log=log)
                (test_source, resume,
                 csv_properties, test_fields) = test_properties
            else:
                test_source_id = bigml.api.get_source_id(args.test_source)
                test_source = api.check_resource(test_source_id)
            if test_dataset is None:
                # create test dataset from test source
                dataset_args = r.set_basic_dataset_args(args, name=test_name)
                test_dataset, resume = pd.alternative_dataset_processing(
                    test_source, "test", dataset_args, api, args,
                    resume, session_file=session_file, path=path, log=log)
            else:
                test_dataset_id = bigml.api.get_dataset_id(test_dataset)
                test_dataset = api.check_resource(test_dataset_id)
            test_fields = pd.get_fields_structure(test_dataset,
                                                  csv_properties)
            batch_topic_distribution_args = \
                r.set_batch_topic_distribution_args( \
                args, fields=fields, \
                dataset_fields=test_fields)

            remote_topic_distribution( \
                topic_model, test_dataset, batch_topic_distribution_args,
                args, api, resume, prediction_file=output,
                session_file=session_file, path=path, log=log)
        else:
            topic_distribution(topic_models, fields, args,
                               session_file=session_file)

    u.print_generated_files(path, log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
示例#53
0
def compute_output(api, args):
    """ Creates one or more anomaly detectors using the `training_set`
        or uses the ids of previously created BigML models to make
        predictions for the `test_set`.

    """

    anomaly = None
    anomalies = None
    # no multi-label support at present

    # variables from command-line options
    resume = args.resume_
    anomaly_ids = args.anomaly_ids_
    output = args.predictions
    # there's only one anomaly detector to be generated at present
    args.max_parallel_anomalies = 1
    # anomalies cannot be published yet.
    args.public_anomaly = False

    # It is compulsory to have a description to publish either datasets or
    # anomalies
    if (not args.description_ and (args.public_anomaly or
                                   args.public_dataset)):
        sys.exit("You should provide a description to publish.")

    # When using --new-fields, it is compulsory to specify also a dataset
    # id
    if args.new_fields and not args.dataset:
        sys.exit("To use --new-fields you must also provide a dataset id"
                 " to generate the new dataset from it.")

    path = u.check_dir(output)
    session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])
    # basic pre-model step: creating or retrieving the source related info
    source, resume, csv_properties, fields = pms.get_source_info(
        api, args, resume, csv_properties, session_file, path, log)
    # basic pre-model step: creating or retrieving the dataset related info
    dataset_properties = pms.get_dataset_info(
        api, args, resume, source,
        csv_properties, fields, session_file, path, log)
    (_, datasets, test_dataset, resume,
     csv_properties, fields) = dataset_properties
    if args.anomaly_file:
        # anomaly is retrieved from the contents of the given local JSON file
        anomaly, csv_properties, fields = u.read_local_resource(
            args.anomaly_file,
            csv_properties=csv_properties)
        anomalies = [anomaly]
        anomaly_ids = [anomaly['resource']]
    else:
        # anomaly is retrieved from the remote object
        anomalies, anomaly_ids, resume = pa.anomalies_processing(
            datasets, anomalies, anomaly_ids, api, args, resume, fields=fields,
            session_file=session_file, path=path, log=log)
    if anomalies:
        anomaly = anomalies[0]

    # We update the anomaly's public state if needed
    if anomaly:
        if not a.has_test(args) and not args.anomalies_dataset:
            query_string = MINIMUM_MODEL
        elif not a.has_test(args):
            query_string = ";".join([EXCLUDE_TREES, r.ALL_FIELDS_QS])
        else:
            query_string = r.ALL_FIELDS_QS
        try:
            anomaly_id = anomaly.get('resource', anomaly)
        except AttributeError:
            anomaly_id = anomaly
        anomaly = u.check_resource(anomaly_id,
                                   query_string=query_string,
                                   api=api)
        anomalies[0] = anomaly
        if (args.public_anomaly or
                (args.shared_flag and r.shared_changed(args.shared, anomaly))):
            anomaly_args = {}
            if args.shared_flag and r.shared_changed(args.shared, anomaly):
                anomaly_args.update(shared=args.shared)
            if args.public_anomaly:
                anomaly_args.update(r.set_publish_anomaly_args(args))
            if anomaly_args:
                anomaly = r.update_anomaly(anomaly, anomaly_args, args,
                                           api=api, path=path,
                                           session_file=session_file)
                anomalies[0] = anomaly

    # We get the fields of the anomaly detector if we haven't got
    # them yet and need them
    if anomaly and (args.test_set or args.export_fields):
        fields = pa.get_anomaly_fields(anomaly, csv_properties, args)

    # If creating a top anomalies excluded/included dataset
    if args.anomalies_dataset and anomaly:
        origin_dataset = anomaly['object'].get('dataset')
        if origin_dataset is None:
            sys.exit("The dataset used to generate the anomaly detector "
                     "cannot be found. Failed to generate the anomalies "
                     " dataset.")
        local_anomaly = Anomaly(anomaly)
        include = args.anomalies_dataset == ANOMALIES_IN
        args.anomaly_filter_ = local_anomaly.anomalies_filter(include=include)
        _, resume = pd.create_new_dataset(
            origin_dataset, api, args, resume, fields=fields,
            session_file=session_file, path=path, log=log)
    # If predicting
    if anomaly and args.score:
        args.test_dataset = anomaly['object']['dataset']
    if anomalies and (a.has_test(args) or (test_dataset and args.remote)):
        # test dataset can be defined by --test-split or --test-dataset or
        # --test-datasets
        if test_dataset is None:
            test_dataset = get_test_dataset(args)
        # Remote anomaly scores: scores are computed as batch anomaly scores
        # in bigml.com except when --no-batch flag is set on
        if args.remote and not args.no_batch:
            # create test source from file
            test_name = "%s - test" % args.name
            if args.test_source is None:
                test_properties = ps.test_source_processing(
                    api, args, resume, name=test_name,
                    session_file=session_file, path=path, log=log)
                (test_source, resume,
                 csv_properties, test_fields) = test_properties
            else:
                test_source_id = bigml.api.get_source_id(args.test_source)
                test_source = api.check_resource(test_source_id)
            if test_dataset is None:
                # create test dataset from test source
                dataset_args = r.set_basic_dataset_args(args, name=test_name)
                test_dataset, resume = pd.alternative_dataset_processing(
                    test_source, "test", dataset_args, api, args,
                    resume, session_file=session_file, path=path, log=log)
            else:
                test_dataset_id = bigml.api.get_dataset_id(test_dataset)
                test_dataset = api.check_resource(test_dataset_id)
            test_fields = pd.get_fields_structure(test_dataset,
                                                  csv_properties)
            batch_anomaly_score_args = r.set_batch_anomaly_score_args(
                args, fields=fields,
                dataset_fields=test_fields)

            remote_anomaly_score(anomaly, test_dataset,
                                 batch_anomaly_score_args, args,
                                 api, resume, prediction_file=output,
                                 session_file=session_file, path=path, log=log)

        else:
            anomaly_score(anomalies, fields, args, session_file=session_file)

    if fields and args.export_fields:
        fields.summary_csv(os.path.join(path, args.export_fields))

    u.print_generated_files(path, log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
示例#54
0
def analyze_dispatcher(args=sys.argv[1:]):
    """Main processing of the parsed options for BigMLer analyze

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command = command_handling(args, COMMAND_LOG)

    # Parses command line arguments.
    command_args = command.parser.parse_args(command.args)
    resume = command_args.resume
    if resume:
        # Keep the debug option if set
        debug = command_args.debug
        # Restore the args of the call to resume from the command log file
        stored_command = StoredCommand(args, COMMAND_LOG, DIRS_LOG)
        command = Command(None, stored_command=stored_command)
        # Logs the issued command and the resumed command
        session_file = os.path.join(stored_command.output_dir, SESSIONS_LOG)
        stored_command.log_command(session_file=session_file)
        # Parses resumed arguments.
        command_args = command.parser.parse_args(command.args)
        command_args.debug = debug
    else:
        if command_args.output_dir is None:
            command_args.output_dir = a.NOW
        session_file = os.path.join(command_args.output_dir,
                                    SESSIONS_LOG)
        # If logging is required, open the file for logging
        log = None
        if command_args.log_file:
            u.check_dir(command_args.log_file)
            log = command_args.log_file
            # If --clear_logs the log files are cleared
            if command_args.clear_logs:
                clear_log_files([log])

        if command_args.model_fields:
            model_fields = command_args.model_fields.split(',')
            command_args.model_fields_ = [model_field.strip()
                                          for model_field in model_fields]
        else:
            command_args.model_fields_ = {}
        u.sys_log_message(u"%s\n" % os.path.abspath(command_args.output_dir),
                          log_file=DIRS_LOG)
        session_file = os.path.join(command_args.output_dir, SESSIONS_LOG)
    # create api instance form args
    api = a.get_api_instance(command_args,
                             u.check_dir(session_file))
    # --maximize flag will be deprecated. Use --optimize flag.
    if command_args.maximize is not None and command_args.optimize is None:
        command_args.optimize = command_args.maximize
    incompatible_flags = [command_args.cv, command_args.features,
                          command_args.nodes]
    if sum([int(bool(flag)) for flag in incompatible_flags]) > 1:
        sys.exit("The following flags cannot be used together:\n    --features"
                 "\n    --cross-validation\n    --nodes")
    # k-fold cross-validation
    if command_args.cv and command_args.dataset is not None:
        create_kfold_cv(command_args, api, command.common_options,
                        resume=resume)

    # features analysis
    if command_args.features:
        create_features_analysis(command_args, api, command.common_options,
                                 resume=resume)

    # node threshold analysis
    if command_args.nodes:
        create_nodes_analysis(command_args, api, command.common_options,
                              resume=resume)
示例#55
0
def compute_output(api, args):
    """ Creates a sample based on a `train_set`, source or dataset.

    """

    samples = None
    # variables from command-line options
    resume = args.resume_
    sample_ids = args.sample_ids_
    output = args.predictions
    # there's only one sample to be generated at present
    args.max_parallel_clusters = 1
    # sample cannot be published yet.
    args.public_sample = False

    # It is compulsory to have a description to publish either datasets or
    # clusters
    if (not args.description_ and (args.public_sample or args.public_dataset)):
        sys.exit("You should provide a description to publish.")

    # When using --new-fields, it is compulsory to specify also a dataset
    # id
    if args.new_fields and not args.dataset:
        sys.exit("To use --new-fields you must also provide a dataset id"
                 " to generate the new dataset from it.")

    path = u.check_dir(output)
    session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # basic pre-sample step: creating or retrieving the source related info
    source, resume, csv_properties, fields = pms.get_source_info(
        api, args, resume, csv_properties, session_file, path, log)
    # basic pre-sample step: creating or retrieving the dataset related info
    dataset_properties = pms.get_dataset_info(api, args, resume, source,
                                              csv_properties, fields,
                                              session_file, path, log)
    (_, datasets, _, resume, csv_properties, fields) = dataset_properties
    if args.sample_file:
        # sample is retrieved from the contents of the given local JSON file
        sample, csv_properties, fields = u.read_local_resource(
            args.sample_file, csv_properties=csv_properties)
        samples = [sample]
        sample_ids = [sample['resource']]
    else:
        # sample is retrieved from the remote object
        samples, sample_ids, resume = psa.samples_processing(
            datasets,
            samples,
            sample_ids,
            api,
            args,
            resume,
            session_file=session_file,
            path=path,
            log=log)
        if samples:
            sample = samples[0]

    # We update the sample's public state if needed
    if sample:
        if isinstance(sample, basestring):
            # build the query string from the sample options
            sample = u.check_resource(sample, api.get_sample)
        samples[0] = sample
        if (args.public_sample or
            (args.shared_flag and r.shared_changed(args.shared, sample))):
            sample_args = {}
            if args.shared_flag and r.shared_changed(args.shared, sample):
                sample_args.update(shared=args.shared)
            if args.public_sample:
                sample_args.update(r.set_publish_sample_args(args))
            if sample_args:
                sample = r.update_sample(sample,
                                         sample_args,
                                         args,
                                         api=api,
                                         path=path,
                                         session_file=session_file)
                samples[0] = sample

    # We get the fields of the sample if we haven't got
    # them yet and need them
    if sample and psa.needs_sample_fields(args):
        fields = psa.get_sample_fields(sample, csv_properties, args)

    sample_file(samples[0],
                fields,
                args,
                api,
                path=path,
                session_file=session_file)

    u.print_generated_files(path,
                            log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
示例#56
0
def project_dispatcher(args=sys.argv[1:]):
    """Parses command line and calls the different processing functions

    """

    command = command_handling(args, COMMAND_LOG)

    # Parses command line arguments.
    command_args = a.parse_and_check(command)
    if command_args.resume:
        command_args, session_file, _ = get_stored_command(
            args, command_args.debug, command_log=COMMAND_LOG,
            dirs_log=DIRS_LOG, sessions_log=SESSIONS_LOG)
    else:
        if command_args.output_dir is None:
            command_args.output_dir = a.NOW
        directory = u.check_dir("%s/x.txt" % command_args.output_dir)
        command_args.output_dir = directory
        session_file = os.path.join(directory, SESSIONS_LOG)
        u.log_message(command.command + "\n", log_file=session_file)


        directory = u.check_dir(os.path.join(command_args.output_dir, "tmp"))
        session_file = os.path.join(directory, SESSIONS_LOG)
        u.log_message(command.command + "\n", log_file=session_file)
        try:
            shutil.copy(DEFAULTS_FILE, os.path.join(directory, DEFAULTS_FILE))
        except IOError:
            pass
        u.sys_log_message(u"%s\n" % os.path.abspath(directory),
                          log_file=DIRS_LOG)


    path = u.check_dir("%s/x.txt" % command_args.output_dir)
    session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
    # If logging is required set the file for logging
    log = None
    if command_args.log_file:
        u.check_dir(command_args.log_file)
        log = command_args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])


    # Creates the corresponding api instance
    api = a.get_api_instance(command_args, u.check_dir(session_file))
    a.get_output_args(api, command_args, command_args.resume)
    a.attribute_args(command_args)


    if not command_args.project_id and command_args.name:
        command_args.project = command_args.name
    if command_args.project:
        # create project
        pp.project_processing(
            api, command_args, command_args.resume, session_file=session_file,
            path=path, log=log, create=True)
    if command_args.project_id and (
            command_args.project_attributes or
            command_args.name or command_args.tag or command_args.description
            or command_args.category):
        # update project's attributes
        pp.update_project(command_args, api, command_args.resume, \
            session_file=session_file)

    u.log_message("_" * 80 + "\n", log_file=session_file)
    u.print_generated_files(command_args.output_dir, log_file=session_file,
                            verbosity=command_args.verbosity)
示例#57
0
def compute_output(api, args):
    """ Creates one or more models using the `training_set` or uses the ids
    of previously created BigML models to make predictions for the `test_set`.

    """

    cluster = None
    clusters = None
    # no multi-label support at present

    # variables from command-line options
    resume = args.resume_
    cluster_ids = args.cluster_ids_
    output = args.predictions
    # there's only one cluster to be generated at present
    args.max_parallel_clusters = 1
    # clusters cannot be published yet.
    args.public_cluster = False

    # It is compulsory to have a description to publish either datasets or
    # clusters
    if (not args.description_
            and (args.public_cluster or args.public_dataset)):
        sys.exit("You should provide a description to publish.")

    # When using --new-fields, it is compulsory to specify also a dataset
    # id
    if args.new_fields and not args.dataset:
        sys.exit("To use --new-fields you must also provide a dataset id"
                 " to generate the new dataset from it.")

    path = u.check_dir(output)
    session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # basic pre-model step: creating or retrieving the source related info
    source, resume, csv_properties, fields = pms.get_source_info(
        api, args, resume, csv_properties, session_file, path, log)
    # basic pre-model step: creating or retrieving the dataset related info
    dataset_properties = pms.get_dataset_info(api, args, resume, source,
                                              csv_properties, fields,
                                              session_file, path, log)
    (_, datasets, test_dataset, resume, csv_properties,
     fields) = dataset_properties
    if args.cluster_file:
        # cluster is retrieved from the contents of the given local JSON file
        cluster, csv_properties, fields = u.read_local_resource(
            args.cluster_file, csv_properties=csv_properties)
        clusters = [cluster]
        cluster_ids = [cluster['resource']]
    else:
        # cluster is retrieved from the remote object
        clusters, cluster_ids, resume = pc.clusters_processing(
            datasets,
            clusters,
            cluster_ids,
            api,
            args,
            resume,
            fields=fields,
            session_file=session_file,
            path=path,
            log=log)
        if clusters:
            cluster = clusters[0]

    # We update the cluster's public state if needed
    if cluster:
        if isinstance(cluster, basestring):
            if args.cluster_datasets is None and not a.has_test(args):
                query_string = MINIMUM_MODEL
            else:
                query_string = ''
            cluster = u.check_resource(cluster,
                                       api.get_cluster,
                                       query_string=query_string)
        clusters[0] = cluster
        if (args.public_cluster or
            (args.shared_flag and r.shared_changed(args.shared, cluster))):
            cluster_args = {}
            if args.shared_flag and r.shared_changed(args.shared, cluster):
                cluster_args.update(shared=args.shared)
            if args.public_cluster:
                cluster_args.update(r.set_publish_cluster_args(args))
            if cluster_args:
                cluster = r.update_cluster(cluster,
                                           cluster_args,
                                           args,
                                           api=api,
                                           path=path,
                                           session_file=session_file)
                clusters[0] = cluster

    # We get the fields of the cluster if we haven't got
    # them yet and need them
    if cluster and (args.test_set or args.export_fields):
        if isinstance(cluster, dict):
            cluster = cluster['resource']
            cluster = u.check_resource(cluster,
                                       api.get_cluster,
                                       query_string=r.ALL_FIELDS_QS)
        fields = pc.get_cluster_fields(cluster, csv_properties, args)

    # If predicting
    if clusters and (a.has_test(args) or (test_dataset and args.remote)):
        if test_dataset is None:
            test_dataset = get_test_dataset(args)

        # Remote centroids: centroids are computed as batch centroids
        # in bigml.com except when --no-batch flag is set on
        if args.remote and not args.no_batch:
            # create test source from file
            test_name = "%s - test" % args.name
            if args.test_source is None:
                test_properties = ps.test_source_processing(
                    api,
                    args,
                    resume,
                    name=test_name,
                    session_file=session_file,
                    path=path,
                    log=log)
                (test_source, resume, csv_properties,
                 test_fields) = test_properties
            else:
                test_source_id = bigml.api.get_source_id(args.test_source)
                test_source = api.check_resource(test_source_id)
            if test_dataset is None:
                # create test dataset from test source
                dataset_args = r.set_basic_dataset_args(args, name=test_name)
                test_dataset, resume = pd.alternative_dataset_processing(
                    test_source,
                    "test",
                    dataset_args,
                    api,
                    args,
                    resume,
                    session_file=session_file,
                    path=path,
                    log=log)
            else:
                test_dataset_id = bigml.api.get_dataset_id(test_dataset)
                test_dataset = api.check_resource(test_dataset_id)
            test_fields = pd.get_fields_structure(test_dataset, csv_properties)
            batch_centroid_args = r.set_batch_centroid_args(
                args, fields=fields, dataset_fields=test_fields)

            remote_centroid(cluster,
                            test_dataset,
                            batch_centroid_args,
                            args,
                            api,
                            resume,
                            prediction_file=output,
                            session_file=session_file,
                            path=path,
                            log=log)

        else:
            centroid(clusters, fields, args, session_file=session_file)

    if cluster and args.cluster_datasets is not None:
        cluster = api.check_resource(cluster)
        centroids_info = cluster['object']['clusters']['clusters']
        centroids = {
            centroid['name']: centroid['id']
            for centroid in centroids_info
        }
        cluster_datasets = cluster['object']['cluster_datasets']
        if args.cluster_datasets == '':
            centroid_ids = centroids.values()
        else:
            centroid_ids = [
                centroids[cluster_name]
                for cluster_name in args.cluster_datasets_
                if cluster_datasets.get(centroids[cluster_name], '') == ''
            ]

        for centroid_id in centroid_ids:
            dataset_args = {'centroid': centroid_id}
            r.create_dataset(cluster,
                             dataset_args,
                             args,
                             api=api,
                             path=path,
                             session_file=session_file,
                             log=log,
                             dataset_type='cluster')

    if cluster and args.cluster_models is not None:
        cluster = api.check_resource(cluster)
        centroids_info = cluster['object']['clusters']['clusters']
        centroids = {
            centroid['name']: centroid['id']
            for centroid in centroids_info
        }
        models = cluster['object']['cluster_models']
        if args.cluster_models == '':
            centroid_ids = centroids.values()
        else:
            centroid_ids = [
                centroids[cluster_name]
                for cluster_name in args.cluster_models_
                if models.get(centroids[cluster_name], '') == ''
            ]

        for centroid_id in centroid_ids:
            model_args = {'centroid': centroid_id}
            r.create_model(cluster,
                           model_args,
                           args,
                           api=api,
                           path=path,
                           session_file=session_file,
                           log=log,
                           model_type='cluster')

    if fields and args.export_fields:
        fields.summary_csv(os.path.join(path, args.export_fields))

    u.print_generated_files(path,
                            log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
示例#58
0
def analyze_dispatcher(args=sys.argv[1:]):
    """Main processing of the parsed options for BigMLer analyze

    """

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        clear_log_files(LOG_FILES)

    command = command_handling(args, COMMAND_LOG)

    # Parses command line arguments.
    command_args = command.parser.parse_args(command.args)
    resume = command_args.resume
    if resume:
        command_args, session_file, _ = get_stored_command(
            args,
            command_args.debug,
            command_log=COMMAND_LOG,
            dirs_log=DIRS_LOG,
            sessions_log=SESSIONS_LOG)
    else:
        if command_args.output_dir is None:
            command_args.output_dir = a.NOW
        session_file = os.path.join(command_args.output_dir, SESSIONS_LOG)
        # If logging is required, open the file for logging
        log = None
        if command_args.log_file:
            u.check_dir(command_args.log_file)
            log = command_args.log_file
            # If --clear_logs the log files are cleared
            if command_args.clear_logs:
                clear_log_files([log])

        if command_args.model_fields:
            model_fields = command_args.model_fields.split(',')
            command_args.model_fields_ = [
                model_field.strip() for model_field in model_fields
            ]
        else:
            command_args.model_fields_ = {}
        u.sys_log_message(u"%s\n" % os.path.abspath(command_args.output_dir),
                          log_file=DIRS_LOG)
        session_file = os.path.join(command_args.output_dir, SESSIONS_LOG)
    # create api instance form args
    api = a.get_api_instance(command_args, u.check_dir(session_file))

    # Creates the corresponding api instance
    api = a.get_api_instance(command_args, u.check_dir(session_file))
    a.transform_dataset_options(command_args, api)

    # --maximize flag will be deprecated. Use --optimize flag.
    if command_args.maximize is not None and command_args.optimize is None:
        command_args.optimize = command_args.maximize
    incompatible_flags = [
        command_args.cv, command_args.features, command_args.nodes,
        command_args.random_fields
    ]
    if sum([int(bool(flag)) for flag in incompatible_flags]) > 1:
        sys.exit("The following flags cannot be used together:\n    --features"
                 "\n    --cross-validation\n    --nodes\n    --random-fields")
    if (command_args.dataset is None and command_args.datasets is None
            and command_args.dataset_file is None):
        sys.exit("The analyze command needs an existing dataset ID. Please, "
                 "use the --dataset flag.")
    if not any(incompatible_flags):
        sys.exit("You need to specify the type of analysis: features, node "
                 "threshold, cross validation or random fields.")
    # k-fold cross-validation
    if command_args.cv and command_args.dataset is not None:
        create_kfold_cv(command_args,
                        api,
                        command.common_options,
                        resume=resume)

    # features analysis
    elif command_args.features:
        create_features_analysis(command_args,
                                 api,
                                 command.common_options,
                                 resume=resume)

    # node threshold analysis
    elif command_args.nodes:
        create_nodes_analysis(command_args,
                              api,
                              command.common_options,
                              resume=resume)

    # random fields analysis
    elif command_args.random_fields:
        create_candidates_analysis(command_args,
                                   api,
                                   command.common_options,
                                   resume=resume)
    else:
        sys.exit("You must choose one of the available analysis: --features,"
                 " --nodes, --random-fields or --cross-validation. Add"
                 " your prefered option to"
                 " the command line or type\n    bigmler analyze --help\n"
                 " to see all the available options.")
示例#59
0
def compute_output(api, args):
    """ Creates one or more models using the `training_set` or uses the ids
    of previously created BigML models to make predictions for the `test_set`.

    """

    logistic_regression = None
    logistic_regressions = None
    # no multi-label support at present

    # variables from command-line options
    resume = args.resume_
    logistic_regression_ids = args.logistic_regression_ids_
    output = args.predictions
    # there's only one logistic regression to be generated at present
    args.max_parallel_logistic_regressions = 1
    # logistic regressions cannot be published yet.
    args.public_logistic_regression = False

    # It is compulsory to have a description to publish either datasets or
    # logistic regressions
    if (not args.description_
            and (args.public_logistic_regression or args.public_dataset)):
        sys.exit("You should provide a description to publish.")

    # When using --new-fields, it is compulsory to specify also a dataset
    # id
    if args.new_fields and not args.dataset:
        sys.exit("To use --new-fields you must also provide a dataset id"
                 " to generate the new dataset from it.")

    path = u.check_dir(output)
    session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    if args.objective_field:
        csv_properties.update({'objective_field': args.objective_field})
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    # basic pre-model step: creating or retrieving the source related info
    source, resume, csv_properties, fields = pms.get_source_info(
        api, args, resume, csv_properties, session_file, path, log)
    # basic pre-model step: creating or retrieving the dataset related info
    dataset_properties = pms.get_dataset_info(api, args, resume, source,
                                              csv_properties, fields,
                                              session_file, path, log)
    (_, datasets, test_dataset, resume, csv_properties,
     fields) = dataset_properties
    if datasets:
        # Now we have a dataset, let's check if there's an objective_field
        # given by the user and update it in the fields structure
        args.objective_id_ = get_objective_id(args, fields)
    if args.logistic_file:
        # logistic regression is retrieved from the contents of the given local
        # JSON file
        logistic_regression, csv_properties, fields = u.read_local_resource(
            args.logistic_file, csv_properties=csv_properties)
        logistic_regressions = [logistic_regression]
        logistic_regression_ids = [logistic_regression['resource']]
    else:
        # logistic regression is retrieved from the remote object
        logistic_regressions, logistic_regression_ids, resume = \
            plr.logistic_regressions_processing( \
            datasets, logistic_regressions, logistic_regression_ids, \
            api, args, resume, fields=fields, \
            session_file=session_file, path=path, log=log)
        if logistic_regressions:
            logistic_regression = logistic_regressions[0]

    # We update the logistic regression's public state if needed
    if logistic_regression:
        if isinstance(logistic_regression, basestring):
            if not a.has_test(args):
                query_string = MINIMUM_MODEL
            elif args.export_fields:
                query_string = r.ALL_FIELDS_QS
            else:
                query_string = ''
            logistic_regression = u.check_resource(logistic_regression,
                                                   api.get_logistic_regression,
                                                   query_string=query_string)
        logistic_regressions[0] = logistic_regression
        if (args.public_logistic_regression
                or (args.shared_flag
                    and r.shared_changed(args.shared, logistic_regression))):
            logistic_regression_args = {}
            if args.shared_flag and r.shared_changed(args.shared,
                                                     logistic_regression):
                logistic_regression_args.update(shared=args.shared)
            if args.public_logistic_regression:
                logistic_regression_args.update( \
                    r.set_publish_logistic_regression_args(args))
            if logistic_regression_args:
                logistic_regression = r.update_logistic_regression( \
                    logistic_regression, logistic_regression_args, args,
                    api=api, path=path, \
                    session_file=session_file)
                logistic_regressions[0] = logistic_regression

    # We get the fields of the logistic_regression if we haven't got
    # them yet and need them
    if logistic_regression and (args.test_set or args.export_fields):
        fields = plr.get_logistic_fields( \
            logistic_regression, csv_properties, args)

    if fields and args.export_fields:
        fields.summary_csv(os.path.join(path, args.export_fields))

    # If predicting
    if logistic_regressions and (a.has_test(args) or \
            (test_dataset and args.remote)):
        if test_dataset is None:
            test_dataset = get_test_dataset(args)

        # Remote predictions: predictions are computed as batch predictions
        # in bigml.com except when --no-batch flag is set on
        if args.remote and not args.no_batch:
            # create test source from file
            test_name = "%s - test" % args.name
            if args.test_source is None:
                test_properties = ps.test_source_processing(
                    api,
                    args,
                    resume,
                    name=test_name,
                    session_file=session_file,
                    path=path,
                    log=log)
                (test_source, resume, csv_properties,
                 test_fields) = test_properties
            else:
                test_source_id = bigml.api.get_source_id(args.test_source)
                test_source = api.check_resource(test_source_id)
            if test_dataset is None:
                # create test dataset from test source
                dataset_args = r.set_basic_dataset_args(args, name=test_name)
                test_dataset, resume = pd.alternative_dataset_processing(
                    test_source,
                    "test",
                    dataset_args,
                    api,
                    args,
                    resume,
                    session_file=session_file,
                    path=path,
                    log=log)
            else:
                test_dataset_id = bigml.api.get_dataset_id(test_dataset)
                test_dataset = api.check_resource(test_dataset_id)

            csv_properties.update(objective_field=None,
                                  objective_field_present=False)
            test_fields = pd.get_fields_structure(test_dataset, csv_properties)
            batch_prediction_args = r.set_batch_prediction_args(
                args, fields=fields, dataset_fields=test_fields)

            remote_lr_prediction(logistic_regression, test_dataset, \
                batch_prediction_args, args, \
                api, resume, prediction_file=output, \
                session_file=session_file, path=path, log=log)

        else:
            lr_prediction(logistic_regressions,
                          fields,
                          args,
                          session_file=session_file)

    # If evaluate flag is on, create remote evaluation and save results in
    # json and human-readable format.
    if args.evaluate:
        # When we resume evaluation and models were already completed, we
        # should use the datasets array as test datasets
        if args.has_test_datasets_:
            test_dataset = get_test_dataset(args)
        if args.dataset_off and not args.has_test_datasets_:
            args.test_dataset_ids = datasets
        if args.test_dataset_ids and args.dataset_off:
            # Evaluate the models with the corresponding test datasets.
            test_dataset_id = bigml.api.get_dataset_id( \
                args.test_dataset_ids[0])
            test_dataset = api.check_resource(test_dataset_id)
            csv_properties.update(objective_field=None,
                                  objective_field_present=False)
            test_fields = pd.get_fields_structure(test_dataset, csv_properties)
            resume = evaluate(logistic_regressions,
                              args.test_dataset_ids,
                              api,
                              args,
                              resume,
                              fields=fields,
                              dataset_fields=test_fields,
                              session_file=session_file,
                              path=path,
                              log=log,
                              objective_field=args.objective_field)
        else:
            dataset = datasets[0]
            if args.test_split > 0 or args.has_test_datasets_:
                dataset = test_dataset
            dataset = u.check_resource(dataset,
                                       api=api,
                                       query_string=r.ALL_FIELDS_QS)
            dataset_fields = pd.get_fields_structure(dataset, None)
            resume = evaluate(logistic_regressions, [dataset],
                              api,
                              args,
                              resume,
                              fields=fields,
                              dataset_fields=dataset_fields,
                              session_file=session_file,
                              path=path,
                              log=log,
                              objective_field=args.objective_field)

    u.print_generated_files(path,
                            log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
示例#60
0
def compute_output(api, args):
    """ Creates a fusion using the `models` list or uses the ids
    of a previously created BigML fusion to make predictions for the `test_set`.

    """

    fusion = None

    # variables from command-line options
    resume = args.resume_
    fusion_ids = args.fusion_ids_
    output = args.predictions
    # there's only one fusion to be generated at present
    args.max_parallel_fusions = 1
    # fusion cannot be published yet.
    args.public_fusion = False

    # It is compulsory to have a description to publish either datasets or
    # fusions
    if (not args.description_ and args.public_fusion):
        sys.exit("You should provide a description to publish.")

    path = u.check_dir(output)
    session_file = u"%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required set the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        clear_log_files([log])

    if args.fusion_file:
        # fusion regression is retrieved from the contents of the given local
        # JSON file
        fusion, csv_properties, fields = u.read_local_resource(
            args.fusion_file, csv_properties=csv_properties)
        fusion_ids = [fusion]
    else:
        # fusion is retrieved from the remote object or created
        fusion, resume = \
            pf.fusion_processing( \
            fusion, fusion_ids, \
            api, args, resume, \
            session_file=session_file, path=path, log=log)

    # We update the fusion public state if needed
    if fusion:
        if isinstance(fusion, basestring):
            if not a.has_test(args):
                query_string = MINIMUM_MODEL
            elif args.export_fields:
                query_string = r.ALL_FIELDS_QS
            else:
                query_string = ''
            fusion = u.check_resource(fusion,
                                      api.get_fusion,
                                      query_string=query_string)
        if (args.public_fusion or
            (args.shared_flag and r.shared_changed(args.shared, fusion))):
            fusion_args = {}
            if args.shared_flag and r.shared_changed(args.shared, fusion):
                fusion_args.update(shared=args.shared)
            if args.public_fusion:
                fusion_args.update( \
                    r.set_publish_fusion_args(args))
            if fusion_args:
                fusion = r.update_fusion( \
                    fusion, fusion_args, args,
                    api=api, path=path, \
                    session_file=session_file)

    # We get the fields of the fusion if we haven't got
    # them yet and need them
    if fusion and (args.test_set or args.evaluate):
        fields = pf.get_fusion_fields( \
            fusion, csv_properties, args)

    # If predicting
    if fusion and (a.has_test(args) or \
            args.remote):
        test_dataset = get_test_dataset(args)

        # Remote predictions: predictions are computed as batch predictions
        # in bigml.com except when --no-batch flag is set on
        if args.remote and not args.no_batch:
            # create test source from file
            test_name = "%s - test" % args.name
            if args.test_source is None:
                test_properties = ps.test_source_processing(
                    api,
                    args,
                    resume,
                    name=test_name,
                    session_file=session_file,
                    path=path,
                    log=log)
                (test_source, resume, csv_properties,
                 test_fields) = test_properties
            else:
                test_source_id = bigml.api.get_source_id(args.test_source)
                test_source = api.check_resource(test_source_id)
            if test_dataset is None:
                # create test dataset from test source
                dataset_args = r.set_basic_dataset_args(args, name=test_name)
                test_dataset, resume = pd.alternative_dataset_processing(
                    test_source,
                    "test",
                    dataset_args,
                    api,
                    args,
                    resume,
                    session_file=session_file,
                    path=path,
                    log=log)
            else:
                test_dataset_id = bigml.api.get_dataset_id(test_dataset)
                test_dataset = api.check_resource(test_dataset_id)

            csv_properties.update(objective_field=None,
                                  objective_field_present=False)
            test_fields = pd.get_fields_structure(test_dataset, csv_properties)
            if not args.evaluate:
                batch_prediction_args = r.set_batch_prediction_args(
                    args, fields=fields, dataset_fields=test_fields)

                remote_prediction(fusion, test_dataset, \
                    batch_prediction_args, args, \
                    api, resume, prediction_file=output, \
                    session_file=session_file, path=path, log=log)

        else:
            prediction([fusion], fields, args, session_file=session_file)

    # If evaluate flag is on, create remote evaluation and save results in
    # json and human-readable format.
    if args.evaluate:
        # When we resume evaluation and models were already completed, we
        # should use the datasets array as test datasets
        args.max_parallel_evaluations = 1  # only one evaluation at present
        args.cross_validation_rate = 0  # no cross-validation
        args.number_of_evaluations = 1  # only one evaluation
        if args.has_test_datasets_:
            test_dataset = get_test_dataset(args)
            dataset = test_dataset
            dataset = u.check_resource(dataset,
                                       api=api,
                                       query_string=r.ALL_FIELDS_QS)
            dataset_fields = pd.get_fields_structure(dataset, None)
            resume = evaluate([fusion], [dataset],
                              api,
                              args,
                              resume,
                              fields=fields,
                              dataset_fields=dataset_fields,
                              session_file=session_file,
                              path=path,
                              log=log,
                              objective_field=args.objective_field)

    u.print_generated_files(path,
                            log_file=session_file,
                            verbosity=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)