예제 #1
0
파일: bigmler.py 프로젝트: arnaudsj/bigmler
def delete_resources(command_args, api):
    """Deletes the resources selected by the user given options

    """
    if command_args.predictions is None:
        path = a.NOW
    else:
        path = u.check_dir(command_args.predictions)
    session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
    message = u.dated("Retrieving objects to delete.\n")
    u.log_message(message, log_file=session_file,
                  console=command_args.verbosity)
    delete_list = []
    if command_args.delete_list:
        delete_list = map(str.strip,
                          command_args.delete_list.split(','))
    if command_args.delete_file:
        if not os.path.exists(command_args.delete_file):
            sys.exit("File %s not found" % command_args.delete_file)
        delete_list.extend([line for line
                            in open(command_args.delete_file, "r")])

    resource_selectors = [
        (command_args.source_tag, api.list_sources),
        (command_args.dataset_tag, api.list_datasets),
        (command_args.model_tag, api.list_models),
        (command_args.prediction_tag, api.list_predictions),
        (command_args.evaluation_tag, api.list_evaluations),
        (command_args.ensemble_tag, api.list_ensembles),
        (command_args.batch_prediction_tag, api.list_batch_predictions)]

    for selector, api_call in resource_selectors:
        query_string = None
        if command_args.all_tag:
            query_string = "tags__in=%s" % command_args.all_tag
        elif selector:
            query_string = "tags__in=%s" % selector
        if query_string:
            delete_list.extend(u.list_ids(api_call, query_string))

    message = u.dated("Deleting objects.\n")
    u.log_message(message, log_file=session_file,
                  console=command_args.verbosity)
    message = "\n".join(delete_list)
    u.log_message(message, log_file=session_file)
    u.delete(api, delete_list)
    if sys.platform == "win32" and sys.stdout.isatty():
        message = (u"\nGenerated files:\n\n" +
                   unicode(u.print_tree(path, " "), "utf-8") + u"\n")
    else:
        message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n"
    u.log_message(message, log_file=session_file,
                  console=command_args.verbosity)
예제 #2
0
def delete_resources(command_args, api):
    """Deletes the resources selected by the user given options

    """
    if command_args.predictions is None:
        path = a.NOW
    else:
        path = u.check_dir(command_args.predictions)
    session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
    message = u.dated("Retrieving objects to delete.\n")
    u.log_message(message, log_file=session_file,
                  console=command_args.verbosity)
    delete_list = []
    if command_args.delete_list:
        delete_list = map(str.strip,
                          command_args.delete_list.split(','))
    if command_args.delete_file:
        if not os.path.exists(command_args.delete_file):
            sys.exit("File %s not found" % command_args.delete_file)
        delete_list.extend([line for line
                            in open(command_args.delete_file, "r")])

    resource_selectors = [
        ("source", command_args.source_tag, api.list_sources),
        ("dataset", command_args.dataset_tag, api.list_datasets),
        ("model", command_args.model_tag, api.list_models),
        ("prediction", command_args.prediction_tag, api.list_predictions),
        ("ensemble", command_args.ensemble_tag, api.list_ensembles),
        ("evaluation", command_args.evaluation_tag, api.list_evaluations),
        ("batchprediction", command_args.batch_prediction_tag,
         api.list_batch_predictions)]

    query_string = None
    if command_args.older_than:
        date_str = get_date(command_args.older_than, api)
        if date_str:
            query_string = "created__lt=%s" % date_str
        else:
            sys.exit("The --older-than and --newer-than flags only accept "
                     "integers (number of days), dates in YYYY-MM-DD format "
                     " and resource ids. Please, double-check your input.")

    if command_args.newer_than:
        date_str = get_date(command_args.newer_than, api)
        if date_str:
            if query_string is None:
                query_string = ""
            else:
                query_string += ";"
            query_string += "created__gt=%s" % date_str
        else:
            sys.exit("The --older-than and --newer-than flags only accept "
                     "integers (number of days), dates in YYYY-MM-DD format "
                     " and resource ids. Please, double-check your input.")

    if (any([selector[1] is not None for selector in resource_selectors]) or
            command_args.all_tag):
        if query_string is None:
            query_string = ""
        else:
            query_string += ";"
        query_value = command_args.all_tag
        for label, selector, api_call in resource_selectors:
            combined_query = query_string
            if not query_value and selector:
                query_value = selector
            if command_args.all_tag or selector:
                combined_query += "tags__in=%s" % query_value
                if label == "model":
                    # avoid ensemble's models
                    combined_query += ";ensemble=false"
                delete_list.extend(u.list_ids(api_call, combined_query))
    else:
        if query_string:
            for label, selector, api_call in resource_selectors:
                combined_query = query_string
                if label == "model":
                    # avoid ensemble's models
                    combined_query += ";ensemble=false"
                delete_list.extend(u.list_ids(api_call, combined_query))

    message = u.dated("Deleting objects.\n")
    u.log_message(message, log_file=session_file,
                  console=command_args.verbosity)
    message = "\n".join(delete_list)
    u.log_message(message, log_file=session_file)
    u.delete(api, delete_list)
    if sys.platform == "win32" and sys.stdout.isatty():
        message = (u"\nGenerated files:\n\n" +
                   unicode(u.print_tree(path, " "), "utf-8") + u"\n")
    else:
        message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n"
    u.log_message(message, log_file=session_file,
                  console=command_args.verbosity)
예제 #3
0
                              session_file=session_file, path=path,
                              log=log, labels=labels, all_labels=all_labels,
                              objective_field=objective_field)

    # If cross_validation_rate is > 0, create remote evaluations and save
    # results in json and human-readable format. Then average the results to
    # issue a cross_validation measure set.
    if args.cross_validation_rate > 0:
        args.sample_rate = 1 - args.cross_validation_rate
        cross_validate(models, dataset, fields, api, args, resume,
                       name=name, description=description,
                       fields_map=fields_map, session_file=session_file,
                       path=path, log=log)

    # Workaround to restore windows console cp850 encoding to print the tree
    if sys.platform == "win32" and sys.stdout.isatty():
        import locale
        data_locale = locale.getlocale()
        if not data_locale[0] is None:
            locale.setlocale(locale.LC_ALL, (data_locale[0], "850"))
        message = (u"\nGenerated files:\n\n" +
                   unicode(u.print_tree(path, u" "), "utf-8") + u"\n")
    else:
        message = (u"\nGenerated files:\n\n" + 
                   u.print_tree(path, u" ") + u"\n")
    u.log_message(message, log_file=session_file, console=args.verbosity)
    if args.reports:
        clear_reports(path)
        if args.upload:
            upload_reports(args.reports, path)
예제 #4
0
def main(args=sys.argv[1:]):
    """Main process

    """
    train_stdin = False
    for i in range(0, len(args)):
        if args[i].startswith("--"):
            args[i] = args[i].replace("_", "-")
            if (args[i] == '--train'
                    and (i == len(args) - 1 or args[i + 1].startswith("--"))):
                train_stdin = True

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        for log_file in LOG_FILES:
            try:
                open(log_file, 'w', 0).close()
            except IOError:
                pass
    literal_args = args[:]
    for i in range(0, len(args)):
        if ' ' in args[i]:
            literal_args[i] = '"%s"' % args[i]
    message = "bigmler %s\n" % " ".join(literal_args)

    # Resume calls are not logged
    if not "--resume" in args:
        with open(COMMAND_LOG, "a", 0) as command_log:
            command_log.write(message)
        resume = False

    parser = create_parser(defaults=get_user_defaults(),
                           constants={
                               'NOW': NOW,
                               'MAX_MODELS': MAX_MODELS,
                               'PLURALITY': PLURALITY
                           })

    # Parses command line arguments.
    command_args = parser.parse_args(args)

    if command_args.cross_validation_rate > 0 and (command_args.test_set
                                                   or command_args.evaluate
                                                   or command_args.model
                                                   or command_args.models
                                                   or command_args.model_tag):
        parser.error("Non compatible flags: --cross-validation-rate"
                     " cannot be used with --evaluate, --model,"
                     " --models or --model-tag. Usage:\n\n"
                     "bigmler --train data/iris.csv "
                     "--cross-validation-rate 0.1")

    default_output = ('evaluation'
                      if command_args.evaluate else 'predictions.csv')
    if command_args.resume:
        debug = command_args.debug
        command = u.get_log_reversed(COMMAND_LOG, command_args.stack_level)
        args = shlex.split(command)[1:]
        try:
            position = args.index("--train")
            if (position == (len(args) - 1)
                    or args[position + 1].startswith("--")):
                train_stdin = True
        except ValueError:
            pass
        output_dir = u.get_log_reversed(DIRS_LOG, command_args.stack_level)
        defaults_file = "%s%s%s" % (output_dir, os.sep, DEFAULTS_FILE)
        parser = create_parser(defaults=get_user_defaults(defaults_file),
                               constants={
                                   'NOW': NOW,
                                   'MAX_MODELS': MAX_MODELS,
                                   'PLURALITY': PLURALITY
                               })
        command_args = parser.parse_args(args)
        if command_args.predictions is None:
            command_args.predictions = ("%s%s%s" %
                                        (output_dir, os.sep, default_output))
        # Logs the issued command and the resumed command
        session_file = "%s%s%s" % (output_dir, os.sep, SESSIONS_LOG)
        u.log_message(message, log_file=session_file)
        message = "\nResuming command:\n%s\n\n" % command
        u.log_message(message, log_file=session_file, console=True)
        try:
            defaults_handler = open(defaults_file, 'r')
            contents = defaults_handler.read()
            message = "\nUsing the following defaults:\n%s\n\n" % contents
            u.log_message(message, log_file=session_file, console=True)
            defaults_handler.close()
        except IOError:
            pass

        resume = True
    else:
        if command_args.predictions is None:
            command_args.predictions = ("%s%s%s" %
                                        (NOW, os.sep, default_output))
        if len(os.path.dirname(command_args.predictions).strip()) == 0:
            command_args.predictions = (
                "%s%s%s" % (NOW, os.sep, command_args.predictions))
        directory = u.check_dir(command_args.predictions)
        session_file = "%s%s%s" % (directory, os.sep, SESSIONS_LOG)
        u.log_message(message + "\n", log_file=session_file)
        try:
            defaults_file = open(DEFAULTS_FILE, 'r')
            contents = defaults_file.read()
            defaults_file.close()
            defaults_copy = open("%s%s%s" % (directory, os.sep, DEFAULTS_FILE),
                                 'w', 0)
            defaults_copy.write(contents)
            defaults_copy.close()
        except IOError:
            pass
        with open(DIRS_LOG, "a", 0) as directory_log:
            directory_log.write("%s\n" % os.path.abspath(directory))

    if resume and debug:
        command_args.debug = True

    if train_stdin:
        command_args.training_set = StringIO.StringIO(sys.stdin.read())

    api_command_args = {
        'username': command_args.username,
        'api_key': command_args.api_key,
        'dev_mode': command_args.dev_mode,
        'debug': command_args.debug
    }

    if command_args.store:
        api_command_args.update({'storage': u.check_dir(session_file)})

    api = bigml.api.BigML(**api_command_args)

    if (command_args.evaluate
            and not (command_args.training_set or command_args.source
                     or command_args.dataset)
            and not (command_args.test_set and
                     (command_args.model or command_args.models
                      or command_args.model_tag or command_args.ensemble))):
        parser.error("Evaluation wrong syntax.\n"
                     "\nTry for instance:\n\nbigmler --train data/iris.csv"
                     " --evaluate\nbigmler --model "
                     "model/5081d067035d076151000011 --dataset "
                     "dataset/5081d067035d076151003423 --evaluate\n"
                     "bigmler --ensemble ensemble/5081d067035d076151003443"
                     " --evaluate")

    if command_args.objective_field:
        objective = command_args.objective_field
        try:
            command_args.objective_field = int(objective)
        except ValueError:
            pass

    output_args = {
        "api": api,
        "training_set": command_args.training_set,
        "test_set": command_args.test_set,
        "output": command_args.predictions,
        "objective_field": command_args.objective_field,
        "name": command_args.name,
        "training_set_header": command_args.train_header,
        "test_set_header": command_args.test_header,
        "args": command_args,
        "resume": resume,
    }

    # Reads description if provided.
    if command_args.description:
        description_arg = u.read_description(command_args.description)
        output_args.update(description=description_arg)
    else:
        output_args.update(description="Created using BigMLer")

    # Parses fields if provided.
    if command_args.field_attributes:
        field_attributes_arg = (u.read_field_attributes(
            command_args.field_attributes))
        output_args.update(field_attributes=field_attributes_arg)

    # Parses types if provided.
    if command_args.types:
        types_arg = u.read_types(command_args.types)
        output_args.update(types=types_arg)

    # Parses dataset fields if provided.
    if command_args.dataset_fields:
        dataset_fields_arg = map(lambda x: x.strip(),
                                 command_args.dataset_fields.split(','))
        output_args.update(dataset_fields=dataset_fields_arg)

    # Parses model input fields if provided.
    if command_args.model_fields:
        model_fields_arg = map(lambda x: x.strip(),
                               command_args.model_fields.split(','))
        output_args.update(model_fields=model_fields_arg)

    model_ids = []
    # Parses model/ids if provided.
    if command_args.models:
        model_ids = u.read_models(command_args.models)
        output_args.update(model_ids=model_ids)

    dataset_id = None
    # Parses dataset/id if provided.
    if command_args.datasets:
        dataset_id = u.read_dataset(command_args.datasets)
        command_args.dataset = dataset_id

    # Retrieve model/ids if provided.
    if command_args.model_tag:
        model_ids = (model_ids + u.list_ids(
            api.list_models, "tags__in=%s" % command_args.model_tag))
        output_args.update(model_ids=model_ids)

    # Reads a json filter if provided.
    if command_args.json_filter:
        json_filter = u.read_json_filter(command_args.json_filter)
        command_args.json_filter = json_filter

    # Reads a lisp filter if provided.
    if command_args.lisp_filter:
        lisp_filter = u.read_lisp_filter(command_args.lisp_filter)
        command_args.lisp_filter = lisp_filter

    # Adds default tags unless that it is requested not to do so.
    if command_args.no_tag:
        command_args.tag.append('BigMLer')
        command_args.tag.append('BigMLer_%s' % NOW)

    # Checks combined votes method
    if (command_args.method
            and not command_args.method in COMBINATION_WEIGHTS.keys()):
        command_args.method = 0
    else:
        combiner_methods = dict([[value, key]
                                 for key, value in COMBINER_MAP.items()])
        command_args.method = combiner_methods.get(command_args.method, 0)

    # Reads votes files in the provided directories.
    if command_args.votes_dirs:
        dirs = map(lambda x: x.strip(), command_args.votes_dirs.split(','))
        votes_path = os.path.dirname(command_args.predictions)
        votes_files = u.read_votes_files(dirs, votes_path)
        output_args.update(votes_files=votes_files)

    # Parses fields map if provided.
    if command_args.fields_map:
        fields_map_arg = u.read_fields_map(command_args.fields_map)
        output_args.update(fields_map=fields_map_arg)

    # Parses resources ids if provided.
    if command_args.delete:
        if command_args.predictions is None:
            path = NOW
        else:
            path = u.check_dir(command_args.predictions)
        session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
        message = u.dated("Retrieving objects to delete.\n")
        u.log_message(message,
                      log_file=session_file,
                      console=command_args.verbosity)
        delete_list = []
        if command_args.delete_list:
            delete_list = map(lambda x: x.strip(),
                              command_args.delete_list.split(','))
        if command_args.delete_file:
            if not os.path.exists(command_args.delete_file):
                raise Exception("File %s not found" % command_args.delete_file)
            delete_list.extend(
                [line for line in open(command_args.delete_file, "r")])
        if command_args.all_tag:
            query_string = "tags__in=%s" % command_args.all_tag
            delete_list.extend(u.list_ids(api.list_sources, query_string))
            delete_list.extend(u.list_ids(api.list_datasets, query_string))
            delete_list.extend(u.list_ids(api.list_models, query_string))
            delete_list.extend(u.list_ids(api.list_predictions, query_string))
            delete_list.extend(u.list_ids(api.list_evaluations, query_string))
        # Retrieve sources/ids if provided
        if command_args.source_tag:
            query_string = "tags__in=%s" % command_args.source_tag
            delete_list.extend(u.list_ids(api.list_sources, query_string))
        # Retrieve datasets/ids if provided
        if command_args.dataset_tag:
            query_string = "tags__in=%s" % command_args.dataset_tag
            delete_list.extend(u.list_ids(api.list_datasets, query_string))
        # Retrieve model/ids if provided
        if command_args.model_tag:
            query_string = "tags__in=%s" % command_args.model_tag
            delete_list.extend(u.list_ids(api.list_models, query_string))
        # Retrieve prediction/ids if provided
        if command_args.prediction_tag:
            query_string = "tags__in=%s" % command_args.prediction_tag
            delete_list.extend(u.list_ids(api.list_predictions, query_string))
        # Retrieve evaluation/ids if provided
        if command_args.evaluation_tag:
            query_string = "tags__in=%s" % command_args.evaluation_tag
            delete_list.extend(u.list_ids(api.list_evaluations, query_string))
        # Retrieve ensembles/ids if provided
        if command_args.ensemble_tag:
            query_string = "tags__in=%s" % command_args.ensemble_tag
            delete_list.extend(u.list_ids(api.list_ensembles, query_string))
        message = u.dated("Deleting objects.\n")
        u.log_message(message,
                      log_file=session_file,
                      console=command_args.verbosity)
        message = "\n".join(delete_list)
        u.log_message(message, log_file=session_file)
        u.delete(api, delete_list)
        if sys.platform == "win32" and sys.stdout.isatty():
            message = (u"\nGenerated files:\n\n" +
                       unicode(u.print_tree(path, " "), "utf-8") + u"\n")
        else:
            message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n"
        u.log_message(message,
                      log_file=session_file,
                      console=command_args.verbosity)
    elif (command_args.training_set or command_args.test_set
          or command_args.source or command_args.dataset
          or command_args.datasets or command_args.votes_dirs):
        compute_output(**output_args)
    u.log_message("_" * 80 + "\n", log_file=session_file)
예제 #5
0
                       fields_map,
                       api,
                       args,
                       resume,
                       session_file=session_file,
                       path=path,
                       log=log)

    # Workaround to restore windows console cp850 encoding to print the tree
    if sys.platform == "win32" and sys.stdout.isatty():
        import locale
        data_locale = locale.getlocale()
        if not data_locale[0] is None:
            locale.setlocale(locale.LC_ALL, (data_locale[0], "850"))
        message = (u"\nGenerated files:\n\n" +
                   unicode(u.print_tree(path, " "), "utf-8") + u"\n")
    else:
        message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n"
    u.log_message(message, log_file=session_file, console=args.verbosity)


def main(args=sys.argv[1:]):
    """Main process

    """
    train_stdin = False
    for i in range(0, len(args)):
        if args[i].startswith("--"):
            args[i] = args[i].replace("_", "-")
            if (args[i] == '--train'
                    and (i == len(args) - 1 or args[i + 1].startswith("--"))):
예제 #6
0
파일: bigmler.py 프로젝트: arnaudsj/bigmler
    # issue a cross_validation measure set.
    if args.cross_validation_rate > 0:
        args.sample_rate = 1 - args.cross_validation_rate
        cross_validate(models, dataset, fields, api, args, resume,
                       name=name, description=description,
                       fields_map=fields_map, session_file=session_file,
                       path=path, log=log)

    # Workaround to restore windows console cp850 encoding to print the tree
    if sys.platform == "win32" and sys.stdout.isatty():
        import locale
        data_locale = locale.getlocale()
        if not data_locale[0] is None:
            locale.setlocale(locale.LC_ALL, (data_locale[0], "850"))
        message = (u"\nGenerated files:\n\n" +
                   unicode(u.print_tree(path, " "), "utf-8") + u"\n")
    else:
        message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n"
    u.log_message(message, log_file=session_file, console=args.verbosity)


def main(args=sys.argv[1:]):
    """Main process

    """
    (flags, train_stdin, test_stdin) = a.get_flags(args)

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        for log_file in LOG_FILES:
            try:
예제 #7
0
def delete_resources(command_args, api):
    """Deletes the resources selected by the user given options

    """
    if command_args.predictions is None:
        path = a.NOW
    else:
        path = u.check_dir(command_args.predictions)
    session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
    message = u.dated("Retrieving objects to delete.\n")
    u.log_message(message,
                  log_file=session_file,
                  console=command_args.verbosity)
    delete_list = []
    if command_args.delete_list:
        delete_list = map(str.strip, command_args.delete_list.split(','))
    if command_args.delete_file:
        if not os.path.exists(command_args.delete_file):
            sys.exit("File %s not found" % command_args.delete_file)
        delete_list.extend(
            [line for line in open(command_args.delete_file, "r")])

    resource_selectors = [(command_args.source_tag, api.list_sources),
                          (command_args.dataset_tag, api.list_datasets),
                          (command_args.model_tag, api.list_models),
                          (command_args.prediction_tag, api.list_predictions),
                          (command_args.evaluation_tag, api.list_evaluations),
                          (command_args.ensemble_tag, api.list_ensembles),
                          (command_args.batch_prediction_tag,
                           api.list_batch_predictions)]

    query_string = None
    if command_args.older_than:
        date_str = get_date(command_args.older_than, api)
        if date_str:
            query_string = "created__lt=%s" % date_str
        else:
            sys.exit("The --older-than and --newer-than flags only accept "
                     "integers (number of days), dates in YYYY-MM-DD format "
                     " and resource ids. Please, double-check your input.")

    if command_args.newer_than:
        date_str = get_date(command_args.newer_than, api)
        if date_str:
            if query_string is None:
                query_string = ""
            else:
                query_string += ";"
            query_string += "created__gt=%s" % date_str
        else:
            sys.exit("The --older-than and --newer-than flags only accept "
                     "integers (number of days), dates in YYYY-MM-DD format "
                     " and resource ids. Please, double-check your input.")

    if (any([selector[0] is not None for selector in resource_selectors])
            or command_args.all_tag):
        if query_string is None:
            query_string = ""
        else:
            query_string += ";"
        for selector, api_call in resource_selectors:
            combined_query = query_string
            if command_args.all_tag:
                combined_query += "tags__in=%s" % command_args.all_tag
                delete_list.extend(u.list_ids(api_call, combined_query))
            elif selector:
                combined_query += "tags__in=%s" % selector
                delete_list.extend(u.list_ids(api_call, combined_query))
    else:
        if query_string:
            for selector, api_call in resource_selectors:
                delete_list.extend(u.list_ids(api_call, query_string))

    message = u.dated("Deleting objects.\n")
    u.log_message(message,
                  log_file=session_file,
                  console=command_args.verbosity)
    message = "\n".join(delete_list)
    u.log_message(message, log_file=session_file)
    u.delete(api, delete_list)
    if sys.platform == "win32" and sys.stdout.isatty():
        message = (u"\nGenerated files:\n\n" +
                   unicode(u.print_tree(path, " "), "utf-8") + u"\n")
    else:
        message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n"
    u.log_message(message,
                  log_file=session_file,
                  console=command_args.verbosity)
예제 #8
0
def main(args=sys.argv[1:]):
    """Main process

    """
    for i in range(0, len(args)):
        if args[i].startswith("--"):
            args[i] = args[i].replace("_", "-")
    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        for log_file in LOG_FILES:
            try:
                open(log_file, 'w', 0).close()
            except IOError:
                pass
    literal_args = args[:]
    for i in range(0, len(args)):
        if ' ' in args[i]:
            literal_args[i] = '"%s"' % args[i]
    message = "bigmler %s\n" % " ".join(literal_args)

    # Resume calls are not logged
    if not "--resume" in args:
        with open(COMMAND_LOG, "a", 0) as command_log:
            command_log.write(message)
        resume = False

    parser = create_parser(defaults=get_user_defaults(), constants={'NOW': NOW,
                           'MAX_MODELS': MAX_MODELS, 'PLURALITY': PLURALITY})

    # Parses command line arguments.
    command_args = parser.parse_args(args)

    default_output = ('evaluation' if command_args.evaluate
                      else 'predictions.csv')
    if command_args.resume:
        debug = command_args.debug
        command = u.get_log_reversed(COMMAND_LOG,
                                     command_args.stack_level)
        args = shlex.split(command)[1:]
        output_dir = u.get_log_reversed(DIRS_LOG,
                                        command_args.stack_level)
        defaults_file = "%s%s%s" % (output_dir, os.sep, DEFAULTS_FILE)
        parser = create_parser(defaults=get_user_defaults(defaults_file),
                               constants={'NOW': NOW, 'MAX_MODELS': MAX_MODELS,
                                          'PLURALITY': PLURALITY})
        command_args = parser.parse_args(args)
        if command_args.predictions is None:
            command_args.predictions = ("%s%s%s" %
                                        (output_dir, os.sep,
                                         default_output))
        # Logs the issued command and the resumed command
        session_file = "%s%s%s" % (output_dir, os.sep, SESSIONS_LOG)
        u.log_message(message, log_file=session_file)
        message = "\nResuming command:\n%s\n\n" % command
        u.log_message(message, log_file=session_file, console=True)
        try:
            defaults_handler = open(defaults_file, 'r')
            contents = defaults_handler.read()
            message = "\nUsing the following defaults:\n%s\n\n" % contents
            u.log_message(message, log_file=session_file, console=True)
            defaults_handler.close()
        except IOError:
            pass

        resume = True
    else:
        if command_args.predictions is None:
            command_args.predictions = ("%s%s%s" %
                                        (NOW, os.sep,
                                         default_output))
        if len(os.path.dirname(command_args.predictions).strip()) == 0:
            command_args.predictions = ("%s%s%s" %
                                        (NOW, os.sep,
                                         command_args.predictions))
        directory = u.check_dir(command_args.predictions)
        session_file = "%s%s%s" % (directory, os.sep, SESSIONS_LOG)
        u.log_message(message + "\n", log_file=session_file)
        try:
            defaults_file = open(DEFAULTS_FILE, 'r')
            contents = defaults_file.read()
            defaults_file.close()
            defaults_copy = open("%s%s%s" % (directory, os.sep, DEFAULTS_FILE),
                                 'w', 0)
            defaults_copy.write(contents)
            defaults_copy.close()
        except IOError:
            pass
        with open(DIRS_LOG, "a", 0) as directory_log:
            directory_log.write("%s\n" % os.path.abspath(directory))

    if resume and debug:
        command_args.debug = True

    api_command_args = {
        'username': command_args.username,
        'api_key': command_args.api_key,
        'dev_mode': command_args.dev_mode,
        'debug': command_args.debug}

    api = bigml.api.BigML(**api_command_args)

    if (command_args.evaluate
        and not (command_args.training_set or command_args.source
                 or command_args.dataset)
        and not (command_args.test_set and (command_args.model or
                 command_args.models or command_args.model_tag))):
        parser.error("Evaluation wrong syntax.\n"
                     "\nTry for instance:\n\nbigmler --train data/iris.csv"
                     " --evaluate\nbigmler --model "
                     "model/5081d067035d076151000011 --dataset "
                     "dataset/5081d067035d076151003423 --evaluate")

    if command_args.objective_field:
        objective = command_args.objective_field
        try:
            command_args.objective_field = int(objective)
        except ValueError:
            pass

    output_args = {
        "api": api,
        "training_set": command_args.training_set,
        "test_set": command_args.test_set,
        "output": command_args.predictions,
        "objective_field": command_args.objective_field,
        "name": command_args.name,
        "training_set_header": command_args.train_header,
        "test_set_header": command_args.test_header,
        "args": command_args,
        "resume": resume,
    }

    # Reads description if provided.
    if command_args.description:
        description_arg = u.read_description(command_args.description)
        output_args.update(description=description_arg)
    else:
        output_args.update(description="Created using BigMLer")

    # Parses fields if provided.
    if command_args.field_attributes:
        field_attributes_arg = (
            u.read_field_attributes(command_args.field_attributes))
        output_args.update(field_attributes=field_attributes_arg)

    # Parses types if provided.
    if command_args.types:
        types_arg = u.read_types(command_args.types)
        output_args.update(types=types_arg)

    # Parses dataset fields if provided.
    if command_args.dataset_fields:
        dataset_fields_arg = map(lambda x: x.strip(),
                                 command_args.dataset_fields.split(','))
        output_args.update(dataset_fields=dataset_fields_arg)

    # Parses model input fields if provided.
    if command_args.model_fields:
        model_fields_arg = map(lambda x: x.strip(),
                               command_args.model_fields.split(','))
        output_args.update(model_fields=model_fields_arg)

    model_ids = []
    # Parses model/ids if provided.
    if command_args.models:
        model_ids = u.read_models(command_args.models)
        output_args.update(model_ids=model_ids)

    dataset_id = None
    # Parses dataset/id if provided.
    if command_args.datasets:
        dataset_id = u.read_dataset(command_args.datasets)
        command_args.dataset = dataset_id

    # Retrieve model/ids if provided.
    if command_args.model_tag:
        model_ids = (model_ids +
                     u.list_ids(api.list_models,
                                "tags__in=%s" % command_args.model_tag))
        output_args.update(model_ids=model_ids)

    # Reads a json filter if provided.
    if command_args.json_filter:
        json_filter = u.read_json_filter(command_args.json_filter)
        command_args.json_filter = json_filter

    # Reads a lisp filter if provided.
    if command_args.lisp_filter:
        lisp_filter = u.read_lisp_filter(command_args.lisp_filter)
        command_args.lisp_filter = lisp_filter

    # Adds default tags unless that it is requested not to do so.
    if command_args.no_tag:
        command_args.tag.append('BigMLer')
        command_args.tag.append('BigMLer_%s' % NOW)

    # Checks combined votes method
    if (command_args.method and
            not command_args.method in COMBINATION_WEIGHTS.keys()):
        command_args.method = 0
    else:
        combiner_methods = dict([[value, key]
                                for key, value in COMBINER_MAP.items()])
        command_args.method = combiner_methods.get(command_args.method, 0)

    # Reads votes files in the provided directories.
    if command_args.votes_dirs:
        dirs = map(lambda x: x.strip(), command_args.votes_dirs.split(','))
        votes_path = os.path.dirname(command_args.predictions)
        votes_files = u.read_votes_files(dirs, votes_path)
        output_args.update(votes_files=votes_files)

    # Parses fields map if provided.
    if command_args.fields_map:
        fields_map_arg = u.read_fields_map(command_args.fields_map)
        output_args.update(fields_map=fields_map_arg)

    # Parses resources ids if provided.
    if command_args.delete:
        if command_args.predictions is None:
            path = NOW
        else:
            path = u.check_dir(command_args.predictions)
        session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
        message = u.dated("Retrieving objects to delete.\n")
        u.log_message(message, log_file=session_file,
                      console=command_args.verbosity)
        delete_list = []
        if command_args.delete_list:
            delete_list = map(lambda x: x.strip(),
                              command_args.delete_list.split(','))
        if command_args.delete_file:
            if not os.path.exists(command_args.delete_file):
                raise Exception("File %s not found" % command_args.delete_file)
            delete_list.extend([line for line
                                in open(command_args.delete_file, "r")])
        if command_args.all_tag:
            query_string = "tags__in=%s" % command_args.all_tag
            delete_list.extend(u.list_ids(api.list_sources, query_string))
            delete_list.extend(u.list_ids(api.list_datasets, query_string))
            delete_list.extend(u.list_ids(api.list_models, query_string))
            delete_list.extend(u.list_ids(api.list_predictions, query_string))
            delete_list.extend(u.list_ids(api.list_evaluations, query_string))
        # Retrieve sources/ids if provided
        if command_args.source_tag:
            query_string = "tags__in=%s" % command_args.source_tag
            delete_list.extend(u.list_ids(api.list_sources, query_string))
        # Retrieve datasets/ids if provided
        if command_args.dataset_tag:
            query_string = "tags__in=%s" % command_args.dataset_tag
            delete_list.extend(u.list_ids(api.list_datasets, query_string))
        # Retrieve model/ids if provided
        if command_args.model_tag:
            query_string = "tags__in=%s" % command_args.model_tag
            delete_list.extend(u.list_ids(api.list_models, query_string))
        # Retrieve prediction/ids if provided
        if command_args.prediction_tag:
            query_string = "tags__in=%s" % command_args.prediction_tag
            delete_list.extend(u.list_ids(api.list_predictions, query_string))
        # Retrieve evaluation/ids if provided
        if command_args.evaluation_tag:
            query_string = "tags__in=%s" % command_args.evaluation_tag
            delete_list.extend(u.list_ids(api.list_evaluations, query_string))
        message = u.dated("Deleting objects.\n")
        u.log_message(message, log_file=session_file,
                      console=command_args.verbosity)
        message = "\n".join(delete_list)
        u.log_message(message, log_file=session_file)
        u.delete(api, delete_list)
        if sys.platform == "win32" and sys.stdout.isatty():
            message = (u"\nGenerated files:\n\n" +
                       unicode(u.print_tree(path, " "), "utf-8") + u"\n")
        else:
            message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n"
        u.log_message(message, log_file=session_file,
                      console=command_args.verbosity)
    elif (command_args.training_set or command_args.test_set
          or command_args.source or command_args.dataset
          or command_args.datasets or command_args.votes_dirs):
        compute_output(**output_args)
    u.log_message("_" * 80 + "\n", log_file=session_file)
예제 #9
0
def compute_output(api, args, training_set, test_set=None, output=None,
                   objective_field=None,
                   description=None,
                   field_attributes=None,
                   types=None,
                   dataset_fields=None,
                   model_fields=None,
                   name=None, training_set_header=True,
                   test_set_header=True, model_ids=None,
                   votes_files=None, resume=False, fields_map=None):
    """ Creates one or more models using the `training_set` or uses the ids
    of previously created BigML models to make predictions for the `test_set`.

    """
    source = None
    dataset = None
    model = None
    models = None
    fields = None

    path = u.check_dir(output)
    session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
    csv_properties = {}
    # If logging is required, open the file for logging
    log = None
    if args.log_file:
        u.check_dir(args.log_file)
        log = args.log_file
        # If --clear_logs the log files are cleared
        if args.clear_logs:
            try:
                open(log, 'w', 0).close()
            except IOError:
                pass

    if (training_set or (args.evaluate and test_set)):
        if resume:
            resume, args.source = u.checkpoint(u.is_source_created, path,
                                               bigml.api, debug=args.debug)
            if not resume:
                message = u.dated("Source not found. Resuming.\n")
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)

    # If neither a previous source, dataset or model are provided.
    # we create a new one. Also if --evaluate and test data are provided
    # we create a new dataset to test with.
    data_set = None
    if (training_set and not args.source and not args.dataset and
            not args.model and not args.models):
        data_set = training_set
        data_set_header = training_set_header
    elif (args.evaluate and test_set and not args.source):
        data_set = test_set
        data_set_header = test_set_header

    if not data_set is None:

        source_args = {
            "name": name,
            "description": description,
            "category": args.category,
            "tags": args.tag,
            "source_parser": {"header": data_set_header}}
        message = u.dated("Creating source.\n")
        u.log_message(message, log_file=session_file, console=args.verbosity)
        source = api.create_source(data_set, source_args,
                                   progress_bar=args.progress_bar)
        source = api.check_resource(source, api.get_source)
        message = u.dated("Source created: %s\n" % u.get_url(source, api))
        u.log_message(message, log_file=session_file, console=args.verbosity)
        u.log_message("%s\n" % source['resource'], log_file=log)

        fields = Fields(source['object']['fields'],
                        source['object']['source_parser']['missing_tokens'],
                        source['object']['source_parser']['locale'])
        source_file = open(path + '/source', 'w', 0)
        source_file.write("%s\n" % source['resource'])
        source_file.write("%s\n" % source['object']['name'])
        source_file.flush()
        source_file.close()

    # If a source is provided, we retrieve it.
    elif args.source:
        message = u.dated("Retrieving source. %s\n" %
                          u.get_url(args.source, api))
        u.log_message(message, log_file=session_file, console=args.verbosity)
        source = api.get_source(args.source)

    # If we already have source, we check that is finished and extract the
    # fields, and update them if needed.
    if source:
        if source['object']['status']['code'] != bigml.api.FINISHED:
            message = u.dated("Retrieving source. %s\n" %
                              u.get_url(source, api))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            source = api.check_resource(source, api.get_source)
        csv_properties = {'missing_tokens':
                          source['object']['source_parser']['missing_tokens'],
                          'data_locale':
                          source['object']['source_parser']['locale']}

        fields = Fields(source['object']['fields'], **csv_properties)
        update_fields = {}
        if field_attributes:
            for (column, value) in field_attributes.iteritems():
                update_fields.update({
                    fields.field_id(column): value})
            message = u.dated("Updating source. %s\n" %
                              u.get_url(source, api))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            source = api.update_source(source, {"fields": update_fields})

        update_fields = {}
        if types:
            for (column, value) in types.iteritems():
                update_fields.update({
                    fields.field_id(column): {'optype': value}})
            message = u.dated("Updating source. %s\n" %
                              u.get_url(source, api))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            source = api.update_source(source, {"fields": update_fields})

    if (training_set or args.source or (args.evaluate and test_set)):
        if resume:
            resume, args.dataset = u.checkpoint(u.is_dataset_created, path,
                                                bigml.api,
                                                debug=args.debug)
            if not resume:
                message = u.dated("Dataset not found. Resuming.\n")
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)
    # If we have a source but not dataset or model has been provided, we
    # create a new dataset if the no_dataset option isn't set up. Also
    # if evaluate is set and test_set has been provided.
    if ((source and not args.dataset and not args.model and not model_ids and
            not args.no_dataset) or
            (args.evaluate and args.test_set and not args.dataset)):
        dataset_args = {
            "name": name,
            "description": description,
            "category": args.category,
            "tags": args.tag
        }

        if args.json_filter:
            dataset_args.update(json_filter=args.json_filter)
        elif args.lisp_filter:
            dataset_args.update(lisp_filter=args.lisp_filter)

        input_fields = []
        if dataset_fields:
            for name in dataset_fields:
                input_fields.append(fields.field_id(name))
            dataset_args.update(input_fields=input_fields)
        message = u.dated("Creating dataset.\n")
        u.log_message(message, log_file=session_file, console=args.verbosity)
        dataset = api.create_dataset(source, dataset_args)
        dataset = api.check_resource(dataset, api.get_dataset)
        message = u.dated("Dataset created: %s\n" % u.get_url(dataset, api))
        u.log_message(message, log_file=session_file, console=args.verbosity)
        u.log_message("%s\n" % dataset['resource'], log_file=log)
        dataset_file = open(path + '/dataset', 'w', 0)
        dataset_file.write("%s\n" % dataset['resource'])
        dataset_file.flush()
        dataset_file.close()

    # If a dataset is provided, let's retrieve it.
    elif args.dataset:
        message = u.dated("Retrieving dataset. %s\n" %
                          u.get_url(args.dataset, api))
        u.log_message(message, log_file=session_file, console=args.verbosity)
        dataset = api.get_dataset(args.dataset)

    # If we already have a dataset, we check the status and get the fields if
    # we hadn't them yet.
    if dataset:
        if dataset['object']['status']['code'] != bigml.api.FINISHED:
            message = u.dated("Retrieving dataset. %s\n" %
                              u.get_url(dataset, api))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            dataset = api.check_resource(dataset, api.get_dataset)
        if not csv_properties:
            csv_properties = {'data_locale':
                              dataset['object']['locale']}
        if args.public_dataset:
            if not description:
                raise Exception("You should provide a description to publish.")
            public_dataset = {"private": False}
            if args.dataset_price:
                message = u.dated("Updating dataset. %s\n" %
                                  u.get_url(dataset, api))
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)
                public_dataset.update(price=args.dataset_price)
            message = u.dated("Updating dataset. %s\n" %
                              u.get_url(dataset, api))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            dataset = api.update_dataset(dataset, public_dataset)
        fields = Fields(dataset['object']['fields'], **csv_properties)

    # If we have a dataset but not a model, we create the model if the no_model
    # flag hasn't been set up.
    if (dataset and not args.model and not model_ids and not args.no_model):
        model_args = {
            "name": name,
            "description": description,
            "category": args.category,
            "tags": args.tag
        }
        if objective_field is not None:
            model_args.update({"objective_field":
                               fields.field_id(objective_field)})
        # If evaluate flag is on, we choose a deterministic sampling with 80%
        # of the data to create the model
        if args.evaluate:
            if args.sample_rate == 1:
                args.sample_rate = EVALUATE_SAMPLE_RATE
            seed = SEED
            model_args.update(seed=seed)

        input_fields = []
        if model_fields:
            for name in model_fields:
                input_fields.append(fields.field_id(name))
            model_args.update(input_fields=input_fields)

        if args.pruning and args.pruning != 'smart':
            model_args.update(stat_pruning=(args.pruning == 'statistical'))

        model_args.update(sample_rate=args.sample_rate,
                          replacement=args.replacement,
                          randomize=args.randomize)
        model_ids = []
        models = []
        if resume:
            resume, model_ids = u.checkpoint(u.are_models_created, path,
                                             args.number_of_models,
                                             bigml.api, debug=args.debug)
            if not resume:
                message = u.dated("Found %s models out of %s. Resuming.\n" %
                                  (len(model_ids),
                                   args.number_of_models))
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)
            models = model_ids
            args.number_of_models -= len(model_ids)

        model_file = open(path + '/models', 'w', 0)
        for model_id in model_ids:
            model_file.write("%s\n" % model_id)
        last_model = None
        if args.number_of_models > 0:
            message = u.dated("Creating %s.\n" %
                              u.plural("model", args.number_of_models))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            for i in range(1, args.number_of_models + 1):
                if i > args.max_parallel_models:
                    api.check_resource(last_model, api.get_model)
                model = api.create_model(dataset, model_args)
                u.log_message("%s\n" % model['resource'], log_file=log)
                last_model = model
                model_ids.append(model['resource'])
                models.append(model)
                model_file.write("%s\n" % model['resource'])
                model_file.flush()
            if args.number_of_models < 2 and args.verbosity:
                if model['object']['status']['code'] != bigml.api.FINISHED:
                    model = api.check_resource(model, api.get_model)
                    models[0] = model
                message = u.dated("Model created: %s.\n" %
                                  u.get_url(model, api))
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)
        model_file.close()

    # If a model is provided, we retrieve it.
    elif args.model:
        message = u.dated("Retrieving model. %s\n" %
                          u.get_url(args.model, api))
        u.log_message(message, log_file=session_file, console=args.verbosity)
        model = api.get_model(args.model)

    elif args.models or args.model_tag:
        models = model_ids[:]

    if model_ids and test_set and not args.evaluate:
        model_id = ""
        if len(model_ids) == 1:
            model_id = model_ids[0]
        message = u.dated("Retrieving %s. %s\n" %
                          (u.plural("model", len(model_ids)),
                           u.get_url(model_id, api)))
        u.log_message(message, log_file=session_file, console=args.verbosity)
        if len(model_ids) < args.max_batch_models:
            models = []
            for model in model_ids:
                model = api.check_resource(model, api.get_model)
                models.append(model)
            model = models[0]
        else:
            model = api.check_resource(model_ids[0], api.get_model)
            models[0] = model

    # We check that the model is finished and get the fields if haven't got
    # them yet.
    if model and not args.evaluate and (test_set or args.black_box
                                        or args.white_box):
        if model['object']['status']['code'] != bigml.api.FINISHED:
            message = u.dated("Retrieving model. %s\n" %
                              u.get_url(model, api))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            model = api.check_resource(model, api.get_model)
        if args.black_box:
            if not description:
                raise Exception("You should provide a description to publish.")
            model = api.update_model(model, {"private": False})
        if args.white_box:
            if not description:
                raise Exception("You should provide a description to publish.")
            public_model = {"private": False, "white_box": True}
            if args.model_price:
                message = u.dated("Updating model. %s\n" %
                                  u.get_url(model, api))
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)
                public_model.update(price=args.model_price)
            if args.cpp:
                message = u.dated("Updating model. %s\n" %
                                  u.get_url(model, api))
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)
                public_model.update(credits_per_prediction=args.cpp)
            model = api.update_model(model, public_model)
        if not csv_properties:
            csv_properties = {'data_locale':
                              model['object']['locale']}
        csv_properties.update(verbose=True)
        if args.user_locale:
            csv_properties.update(data_locale=args.user_locale)

        fields = Fields(model['object']['model']['fields'], **csv_properties)

    if model and not models:
        models = [model]

    if models and test_set and not args.evaluate:
        objective_field = models[0]['object']['objective_fields']
        if isinstance(objective_field, list):
            objective_field = objective_field[0]
        predict(test_set, test_set_header, models, fields, output,
                objective_field, args.remote, api, log,
                args.max_batch_models, args.method, resume, args.tag,
                args.verbosity, session_file, args.debug)

    # When combine_votes flag is used, retrieve the predictions files saved
    # in the comma separated list of directories and combine them
    if votes_files:
        model_id = re.sub(r'.*(model_[a-f0-9]{24})__predictions\.csv$',
                          r'\1', votes_files[0]).replace("_", "/")
        model = api.check_resource(model_id, api.get_model)
        local_model = Model(model)
        message = u.dated("Combining votes.\n")
        u.log_message(message, log_file=session_file,
                      console=args.verbosity)
        u.combine_votes(votes_files, local_model.to_prediction,
                        output, args.method)

    # If evaluate flag is on, create remote evaluation and save results in
    # json and human-readable format.
    if args.evaluate:
        if resume:
            resume, evaluation = u.checkpoint(u.is_evaluation_created, path,
                                              bigml.api,
                                              debug=args.debug)
            if not resume:
                message = u.dated("Evaluation not found. Resuming.\n")
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)
        if not resume:
            evaluation_file = open(path + '/evaluation', 'w', 0)
            evaluation_args = {
                "name": name,
                "description": description,
                "tags": args.tag
            }
            if not fields_map is None:
                update_map = {}
                for (dataset_column, model_column) in fields_map.iteritems():
                    update_map.update({
                        fields.field_id(dataset_column):
                        fields.field_id(model_column)})
                evaluation_args.update({"fields_map": update_map})
            if not ((args.dataset or args.test_set)
                    and (args.model or args.models or args.model_tag)):
                evaluation_args.update(out_of_bag=True, seed=SEED,
                                       sample_rate=args.sample_rate)
            message = u.dated("Creating evaluation.\n")
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            evaluation = api.create_evaluation(model, dataset, evaluation_args)
            u.log_message("%s\n" % evaluation['resource'], log_file=log)
            evaluation_file.write("%s\n" % evaluation['resource'])
            evaluation_file.flush()
            evaluation_file.close()
        message = u.dated("Retrieving evaluation. %s\n" %
                          u.get_url(evaluation, api))
        u.log_message(message, log_file=session_file, console=args.verbosity)
        evaluation = api.check_resource(evaluation, api.get_evaluation)
        evaluation_json = open(output + '.json', 'w', 0)
        evaluation_json.write(json.dumps(evaluation['object']['result']))
        evaluation_json.flush()
        evaluation_json.close()
        evaluation_txt = open(output + '.txt', 'w', 0)
        api.pprint(evaluation['object']['result'],
                   evaluation_txt)
        evaluation_txt.flush()
        evaluation_txt.close()

    # Workaround to restore windows console cp850 encoding to print the tree
    if sys.platform == "win32" and sys.stdout.isatty():
        import locale
        data_locale = locale.getlocale()
        if not data_locale[0] is None:
            locale.setlocale(locale.LC_ALL, (data_locale[0], "850"))
        message = (u"\nGenerated files:\n\n" +
                   unicode(u.print_tree(path, " "), "utf-8") + u"\n")
    else:
        message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n"
    u.log_message(message, log_file=session_file, console=args.verbosity)
예제 #10
0
def main(args=sys.argv[1:]):
    """Main process

    """
    train_stdin = False
    test_stdin = False
    flags = []
    for i in range(0, len(args)):
        if args[i].startswith("--"):
            flag = args[i]
            # syntax --flag=value
            if "=" in flag:
                flag = args[i][0: flag.index("=")]
            flag = flag.replace("_", "-")
            flags.append(flag)
            if (flag == '--train' and
                    (i == len(args) - 1 or args[i + 1].startswith("--"))):
                train_stdin = True
            elif (flag == '--test' and
                    (i == len(args) - 1 or args[i + 1].startswith("--"))):
                test_stdin = True

    # If --clear-logs the log files are cleared
    if "--clear-logs" in args:
        for log_file in LOG_FILES:
            try:
                open(log_file, 'w', 0).close()
            except IOError:
                pass
    literal_args = args[:]
    for i in range(0, len(args)):
        # quoting literals with blanks: 'petal length'
        if ' ' in args[i]:
            prefix = ""
            literal = args[i]
            # literals with blanks after "+" or "-": +'petal length'
            if args[i][0] in r.ADD_REMOVE_PREFIX:
                prefix = args[i][0]
                literal = args[i][1:]
            literal_args[i] = '%s"%s"' % (prefix, literal)
    message = "bigmler %s\n" % " ".join(literal_args)

    # Resume calls are not logged
    if not "--resume" in args:
        with open(COMMAND_LOG, "a", 0) as command_log:
            command_log.write(message)
        resume = False
    user_defaults = get_user_defaults()
    parser = create_parser(defaults=get_user_defaults(),
                           constants={'NOW': NOW,
                           'MAX_MODELS': MAX_MODELS, 'PLURALITY': PLURALITY})

    # Parses command line arguments.
    command_args = parser.parse_args(args)

    if command_args.cross_validation_rate > 0 and (
            non_compatible(command_args, '--cross-validation-rate')):
        parser.error("Non compatible flags: --cross-validation-rate"
                     " cannot be used with --evaluate, --model,"
                     " --models or --model-tag. Usage:\n\n"
                     "bigmler --train data/iris.csv "
                     "--cross-validation-rate 0.1")

    if train_stdin and command_args.multi_label:
        parser.error("Reading multi-label training sets from stream "
                     "is not yet available.")

    if test_stdin and command_args.resume:
        parser.error("Can't resume when using stream reading test sets.")

    default_output = ('evaluation' if command_args.evaluate
                      else 'predictions.csv')
    if command_args.resume:
        debug = command_args.debug
        command = u.get_log_reversed(COMMAND_LOG,
                                     command_args.stack_level)
        args = shlex.split(command)[1:]
        try:
            position = args.index("--train")
            train_stdin = (position == (len(args) - 1) or
                           args[position + 1].startswith("--"))
        except ValueError:
            pass
        try:
            position = args.index("--test")
            test_stdin = (position == (len(args) - 1) or
                          args[position + 1].startswith("--"))
        except ValueError:
            pass
        output_dir = u.get_log_reversed(DIRS_LOG,
                                        command_args.stack_level)
        defaults_file = "%s%s%s" % (output_dir, os.sep, DEFAULTS_FILE)
        user_defaults = get_user_defaults(defaults_file)
        parser = create_parser(defaults=user_defaults,
                               constants={'NOW': NOW,
                                          'MAX_MODELS': MAX_MODELS,
                                          'PLURALITY': PLURALITY})
        command_args = parser.parse_args(args)
        if command_args.predictions is None:
            command_args.predictions = ("%s%s%s" %
                                        (output_dir, os.sep,
                                         default_output))
        # Logs the issued command and the resumed command
        session_file = "%s%s%s" % (output_dir, os.sep, SESSIONS_LOG)
        u.log_message(message, log_file=session_file)
        message = "\nResuming command:\n%s\n\n" % command
        u.log_message(message, log_file=session_file, console=True)
        try:
            defaults_handler = open(defaults_file, 'r')
            contents = defaults_handler.read()
            message = "\nUsing the following defaults:\n%s\n\n" % contents
            u.log_message(message, log_file=session_file, console=True)
            defaults_handler.close()
        except IOError:
            pass

        resume = True
    else:
        if command_args.predictions is None:
            command_args.predictions = ("%s%s%s" %
                                        (NOW, os.sep,
                                         default_output))
        if len(os.path.dirname(command_args.predictions).strip()) == 0:
            command_args.predictions = ("%s%s%s" %
                                        (NOW, os.sep,
                                         command_args.predictions))
        directory = u.check_dir(command_args.predictions)
        session_file = "%s%s%s" % (directory, os.sep, SESSIONS_LOG)
        u.log_message(message + "\n", log_file=session_file)
        try:
            defaults_file = open(DEFAULTS_FILE, 'r')
            contents = defaults_file.read()
            defaults_file.close()
            defaults_copy = open("%s%s%s" % (directory, os.sep, DEFAULTS_FILE),
                                 'w', 0)
            defaults_copy.write(contents)
            defaults_copy.close()
        except IOError:
            pass
        with open(DIRS_LOG, "a", 0) as directory_log:
            directory_log.write("%s\n" % os.path.abspath(directory))

    if resume and debug:
        command_args.debug = True

    if train_stdin:
        if test_stdin:
            sys.exit("The standard input can't be used both for training and"
                     " testing. Choose one of them")
        command_args.training_set = StringIO.StringIO(sys.stdin.read())
    elif test_stdin:
        command_args.test_set = StringIO.StringIO(sys.stdin.read())

    api_command_args = {
        'username': command_args.username,
        'api_key': command_args.api_key,
        'dev_mode': command_args.dev_mode,
        'debug': command_args.debug}

    if command_args.store:
        api_command_args.update({'storage': u.check_dir(session_file)})

    api = bigml.api.BigML(**api_command_args)

    if (command_args.evaluate
        and not (command_args.training_set or command_args.source
                 or command_args.dataset)
        and not ((command_args.test_set or command_args.test_split) and
                 (command_args.model or
                  command_args.models or command_args.model_tag or
                  command_args.ensemble or command_args.ensembles or
                  command_args.ensemble_tag))):
        parser.error("Evaluation wrong syntax.\n"
                     "\nTry for instance:\n\nbigmler --train data/iris.csv"
                     " --evaluate\nbigmler --model "
                     "model/5081d067035d076151000011 --dataset "
                     "dataset/5081d067035d076151003423 --evaluate\n"
                     "bigmler --ensemble ensemble/5081d067035d076151003443"
                     " --dataset "
                     "dataset/5081d067035d076151003423 --evaluate")

    if command_args.objective_field:
        objective = command_args.objective_field
        try:
            command_args.objective_field = int(objective)
        except ValueError:
            if not command_args.train_header:
                sys.exit("The %s has been set as objective field but"
                         " the file has not been marked as containing"
                         " headers.\nPlease set the --train-header flag if"
                         " the file has headers or use a column number"
                         " to set the objective field." % objective)

    output_args = {
        "api": api,
        "training_set": command_args.training_set,
        "test_set": command_args.test_set,
        "output": command_args.predictions,
        "objective_field": command_args.objective_field,
        "name": command_args.name,
        "training_set_header": command_args.train_header,
        "test_set_header": command_args.test_header,
        "args": command_args,
        "resume": resume,
    }

    # Reads description if provided.
    if command_args.description:
        description_arg = u.read_description(command_args.description)
        output_args.update(description=description_arg)
    else:
        output_args.update(description="Created using BigMLer")

    # Parses fields if provided.
    if command_args.field_attributes:
        field_attributes_arg = (
            u.read_field_attributes(command_args.field_attributes))
        output_args.update(field_attributes=field_attributes_arg)

    # Parses types if provided.
    if command_args.types:
        types_arg = u.read_types(command_args.types)
        output_args.update(types=types_arg)

    # Parses dataset fields if provided.
    if command_args.dataset_fields:
        dataset_fields_arg = map(str.strip,
                                 command_args.dataset_fields.split(','))
        output_args.update(dataset_fields=dataset_fields_arg)

    # Parses model input fields if provided.
    if command_args.model_fields:
        model_fields_arg = map(str.strip,
                               command_args.model_fields.split(','))
        output_args.update(model_fields=model_fields_arg)

    model_ids = []
    # Parses model/ids if provided.
    if command_args.models:
        model_ids = u.read_resources(command_args.models)
        output_args.update(model_ids=model_ids)

    dataset_id = None
    # Parses dataset/id if provided.
    if command_args.datasets:
        dataset_id = u.read_dataset(command_args.datasets)
        command_args.dataset = dataset_id

    # Retrieve model/ids if provided.
    if command_args.model_tag:
        model_ids = (model_ids +
                     u.list_ids(api.list_models,
                                "tags__in=%s" % command_args.model_tag))
        output_args.update(model_ids=model_ids)

    # Reads a json filter if provided.
    if command_args.json_filter:
        json_filter = u.read_json_filter(command_args.json_filter)
        command_args.json_filter = json_filter

    # Reads a lisp filter if provided.
    if command_args.lisp_filter:
        lisp_filter = u.read_lisp_filter(command_args.lisp_filter)
        command_args.lisp_filter = lisp_filter

    # Adds default tags unless that it is requested not to do so.
    if command_args.no_tag:
        command_args.tag.append('BigMLer')
        command_args.tag.append('BigMLer_%s' % NOW)

    # Checks combined votes method
    if (command_args.method and
            not command_args.method in COMBINATION_WEIGHTS.keys()):
        command_args.method = 0
    else:
        combiner_methods = dict([[value, key]
                                for key, value in COMBINER_MAP.items()])
        command_args.method = combiner_methods.get(command_args.method, 0)

    # Adds replacement=True if creating ensemble and nothing is specified
    if (command_args.number_of_models > 1 and
            not command_args.replacement and
            not '--no-replacement' in flags and
            not 'replacement' in user_defaults and
            not '--no-randomize' in flags and
            not 'randomize' in user_defaults and
            not '--sample-rate' in flags and
            not 'sample_rate' in user_defaults):
        command_args.replacement = True

    # Reads votes files in the provided directories.
    if command_args.votes_dirs:
        dirs = map(str.strip, command_args.votes_dirs.split(','))
        votes_path = os.path.dirname(command_args.predictions)
        votes_files = u.read_votes_files(dirs, votes_path)
        output_args.update(votes_files=votes_files)

    # Parses fields map if provided.
    if command_args.fields_map:
        fields_map_arg = u.read_fields_map(command_args.fields_map)
        output_args.update(fields_map=fields_map_arg)

    # Old value for --prediction-info='full data' maps to 'full'
    if command_args.prediction_info == 'full data':
        print "WARNING: 'full data' is a deprecated value. Use 'full' instead"
        command_args.prediction_info = FULL_FORMAT

    # Parses resources ids if provided.
    if command_args.delete:
        if command_args.predictions is None:
            path = NOW
        else:
            path = u.check_dir(command_args.predictions)
        session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG)
        message = u.dated("Retrieving objects to delete.\n")
        u.log_message(message, log_file=session_file,
                      console=command_args.verbosity)
        delete_list = []
        if command_args.delete_list:
            delete_list = map(str.strip,
                              command_args.delete_list.split(','))
        if command_args.delete_file:
            if not os.path.exists(command_args.delete_file):
                sys.exit("File %s not found" % command_args.delete_file)
            delete_list.extend([line for line
                                in open(command_args.delete_file, "r")])
        if command_args.all_tag:
            query_string = "tags__in=%s" % command_args.all_tag
            delete_list.extend(u.list_ids(api.list_sources, query_string))
            delete_list.extend(u.list_ids(api.list_datasets, query_string))
            delete_list.extend(u.list_ids(api.list_models, query_string))
            delete_list.extend(u.list_ids(api.list_predictions, query_string))
            delete_list.extend(u.list_ids(api.list_evaluations, query_string))
        # Retrieve sources/ids if provided
        if command_args.source_tag:
            query_string = "tags__in=%s" % command_args.source_tag
            delete_list.extend(u.list_ids(api.list_sources, query_string))
        # Retrieve datasets/ids if provided
        if command_args.dataset_tag:
            query_string = "tags__in=%s" % command_args.dataset_tag
            delete_list.extend(u.list_ids(api.list_datasets, query_string))
        # Retrieve model/ids if provided
        if command_args.model_tag:
            query_string = "tags__in=%s" % command_args.model_tag
            delete_list.extend(u.list_ids(api.list_models, query_string))
        # Retrieve prediction/ids if provided
        if command_args.prediction_tag:
            query_string = "tags__in=%s" % command_args.prediction_tag
            delete_list.extend(u.list_ids(api.list_predictions, query_string))
        # Retrieve evaluation/ids if provided
        if command_args.evaluation_tag:
            query_string = "tags__in=%s" % command_args.evaluation_tag
            delete_list.extend(u.list_ids(api.list_evaluations, query_string))
        # Retrieve ensembles/ids if provided
        if command_args.ensemble_tag:
            query_string = "tags__in=%s" % command_args.ensemble_tag
            delete_list.extend(u.list_ids(api.list_ensembles, query_string))
        message = u.dated("Deleting objects.\n")
        u.log_message(message, log_file=session_file,
                      console=command_args.verbosity)
        message = "\n".join(delete_list)
        u.log_message(message, log_file=session_file)
        u.delete(api, delete_list)
        if sys.platform == "win32" and sys.stdout.isatty():
            message = (u"\nGenerated files:\n\n" +
                       unicode(u.print_tree(path, " "), "utf-8") + u"\n")
        else:
            message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n"
        u.log_message(message, log_file=session_file,
                      console=command_args.verbosity)
    elif (command_args.training_set or command_args.test_set
          or command_args.source or command_args.dataset
          or command_args.datasets or command_args.votes_dirs):
        compute_output(**output_args)
    u.log_message("_" * 80 + "\n", log_file=session_file)