Example #1
0
def evaluate(model, dataset, name, description, fields, fields_map, output,
             api, args, resume,
             session_file=None, path=None, log=None):
    """Evaluates a model or an ensemble with the given dataset

    """
    if resume:
        message = u.dated("Evaluation not found. Resuming.\n")
        resume, evaluation = c.checkpoint(
            c.is_evaluation_created, path, debug=args.debug, message=message,
            log_file=session_file, console=args.verbosity)

    if not resume:
        evaluation_args = r.set_evaluation_args(name, description, args,
                                                fields, fields_map)
        if args.ensemble:
            model_or_ensemble = args.ensemble
        else:
            model_or_ensemble = model
        evaluation = r.create_evaluation(model_or_ensemble, dataset,
                                         evaluation_args,
                                         args, api, path, session_file,
                                         log)

    evaluation = r.get_evaluation(evaluation, api, args.verbosity,
                                  session_file)
    r.save_evaluation(evaluation, output, api)
    return resume
Example #2
0
def evaluations_process(time_series_set, datasets,
                        fields, dataset_fields, api, args, resume,
                        session_file=None, path=None, log=None,
                        objective_field=None):
    """Evaluates time-series against datasets

    """

    existing_evaluations = 0
    evaluations = []
    number_of_evaluations = len(time_series_set)
    if resume:
        resume, evaluations = c.checkpoint(c.are_evaluations_created, path,
                                           number_of_evaluations,
                                           debug=args.debug)
        if not resume:
            existing_evaluations = len(evaluations)
            message = u.dated("Found %s evaluations from %s. Resuming.\n" %
                              (existing_evaluations,
                               number_of_evaluations))
            number_of_evaluations -= existing_evaluations
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
    if not resume:
        evaluation_args = r.set_evaluation_args(args, fields,
                                                dataset_fields)
        evaluations.extend(r.create_evaluations(
            time_series_set, datasets, evaluation_args,
            args, api, path=path, session_file=session_file,
            log=log, existing_evaluations=existing_evaluations))

    return evaluations, resume
Example #3
0
def evaluate(model, dataset, name, description, fields, fields_map, output,
             api, args, resume,
             session_file=None, path=None, log=None):
    """Evaluates a model or an ensemble with the given dataset

    """
    if resume:
        message = u.dated("Evaluation not found. Resuming.\n")
        resume, evaluation = c.checkpoint(
            c.is_evaluation_created, path, debug=args.debug, message=message,
            log_file=session_file, console=args.verbosity)

    if not resume:
        evaluation_args = r.set_evaluation_args(name, description, args,
                                                fields, fields_map)
        if args.ensemble:
            model_or_ensemble = args.ensemble
        else:
            model_or_ensemble = model
        evaluation = r.create_evaluation(model_or_ensemble, dataset,
                                         evaluation_args,
                                         args, api, path, session_file,
                                         log)

    evaluation = r.get_evaluation(evaluation, api, args.verbosity,
                                  session_file)
    r.save_evaluation(evaluation, output, api)
    return resume
Example #4
0
def evaluations_process(models_or_ensembles,
                        datasets,
                        name,
                        description,
                        fields,
                        dataset_fields,
                        fields_map,
                        api,
                        args,
                        resume,
                        session_file=None,
                        path=None,
                        log=None,
                        labels=None,
                        all_labels=None,
                        objective_field=None):
    """Evaluates models or ensembles against datasets

    """
    existing_evaluations = 0
    evaluations = []
    number_of_evaluations = len(models_or_ensembles)
    if resume:
        resume, evaluations = c.checkpoint(c.are_evaluations_created,
                                           path,
                                           number_of_evaluations,
                                           debug=args.debug)
        if not resume:
            existing_evaluations = len(evaluations)
            message = u.dated("Found %s evaluations from %s. Resuming.\n" %
                              (existing_evaluations, number_of_evaluations))
            number_of_evaluations -= existing_evaluations
            u.log_message(message,
                          log_file=session_file,
                          console=args.verbosity)
    if not resume:
        if args.multi_label:
            evaluation_args = r.set_label_evaluation_args(
                name, description, args, labels, all_labels,
                number_of_evaluations, fields, dataset_fields, fields_map,
                objective_field)
        else:
            evaluation_args = r.set_evaluation_args(name, description, args,
                                                    fields, dataset_fields,
                                                    fields_map)

        evaluations.extend(
            r.create_evaluations(models_or_ensembles,
                                 datasets,
                                 evaluation_args,
                                 args,
                                 api,
                                 path=path,
                                 session_file=session_file,
                                 log=log,
                                 existing_evaluations=existing_evaluations))

    return evaluations, resume
Example #5
0
def cross_validate(models,
                   dataset,
                   number_of_evaluations,
                   name,
                   description,
                   fields,
                   fields_map,
                   api,
                   args,
                   resume,
                   session_file=None,
                   path=None,
                   log=None):
    """Cross-validates using a MONTE-CARLO variant

    """
    existing_evaluations = 0
    evaluations = []
    if resume:
        resume, evaluations = c.checkpoint(c.are_evaluations_created,
                                           path,
                                           number_of_evaluations,
                                           debug=args.debug)
        if not resume:
            existing_evaluations = len(evaluations)
            message = u.dated("Found %s evaluations from %s. Resuming.\n" %
                              (existing_evaluations, number_of_evaluations))
            number_of_evaluations -= existing_evaluations
            u.log_message(message,
                          log_file=session_file,
                          console=args.verbosity)
    if not resume:
        evaluation_args = r.set_evaluation_args(name, description, args,
                                                fields, fields_map)

        evaluations.extend(
            r.create_evaluations(models, dataset, evaluation_args, args, api,
                                 path, session_file, log,
                                 existing_evaluations))
        evaluations_files = []
        for evaluation in evaluations:
            evaluation = r.get_evaluation(evaluation, api, args.verbosity,
                                          session_file)
            model_id = evaluation['object']['model']
            file_name = "%s%s%s__evaluation" % (path, os.sep,
                                                model_id.replace("/", "_"))
            evaluations_files.append(file_name + ".json")
            r.save_evaluation(evaluation, file_name, api)
        cross_validation = average_evaluations(evaluations_files)
        file_name = "%s%scross_validation" % (path, os.sep)
        r.save_evaluation(cross_validation, file_name, api)
Example #6
0
def evaluations_process(time_series_set,
                        datasets,
                        fields,
                        dataset_fields,
                        api,
                        args,
                        resume,
                        session_file=None,
                        path=None,
                        log=None,
                        objective_field=None):
    """Evaluates time-series against datasets

    """

    existing_evaluations = 0
    evaluations = []
    number_of_evaluations = len(time_series_set)
    if resume:
        resume, evaluations = c.checkpoint(c.are_evaluations_created,
                                           path,
                                           number_of_evaluations,
                                           debug=args.debug)
        if not resume:
            existing_evaluations = len(evaluations)
            message = u.dated("Found %s evaluations from %s. Resuming.\n" %
                              (existing_evaluations, number_of_evaluations))
            number_of_evaluations -= existing_evaluations
            u.log_message(message,
                          log_file=session_file,
                          console=args.verbosity)
    if not resume:
        evaluation_args = r.set_evaluation_args(args, fields, dataset_fields)
        evaluations.extend(
            r.create_evaluations(time_series_set,
                                 datasets,
                                 evaluation_args,
                                 args,
                                 api,
                                 path=path,
                                 session_file=session_file,
                                 log=log,
                                 existing_evaluations=existing_evaluations))

    return evaluations, resume
Example #7
0
def cross_validate(models, dataset, number_of_evaluations, name, description,
                   fields, fields_map, api, args, resume,
                   session_file=None, path=None, log=None):
    """Cross-validates using a MONTE-CARLO variant

    """
    existing_evaluations = 0
    evaluations = []
    if resume:
        resume, evaluations = c.checkpoint(c.are_evaluations_created, path,
                                           number_of_evaluations,
                                           debug=args.debug)
        if not resume:
            existing_evaluations = len(evaluations)
            message = u.dated("Found %s evaluations from %s. Resuming.\n" %
                              (existing_evaluations,
                               number_of_evaluations))
            number_of_evaluations -= existing_evaluations
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
    if not resume:
        evaluation_args = r.set_evaluation_args(name, description, args,
                                                fields, fields_map)

        evaluations.extend(r.create_evaluations(models, dataset,
                                                evaluation_args,
                                                args, api, path,
                                                session_file, log,
                                                existing_evaluations))
        evaluations_files = []
        for evaluation in evaluations:
            evaluation = r.get_evaluation(evaluation, api, args.verbosity,
                                          session_file)
            model_id = evaluation['object']['model']
            file_name = "%s%s%s__evaluation" % (path, os.sep,
                                                model_id.replace("/", "_"))
            evaluations_files.append(file_name + ".json")
            r.save_evaluation(evaluation, file_name, api)
        cross_validation = average_evaluations(evaluations_files)
        file_name = "%s%scross_validation" % (path, os.sep)
        r.save_evaluation(cross_validation, file_name, api)
Example #8
0
def evaluations_process(models_or_ensembles, datasets, name, description,
                        fields, dataset_fields, fields_map, api, args, resume,
                        session_file=None, path=None, log=None, labels=None,
                        all_labels=None, objective_field=None):
    """Evaluates models or ensembles against datasets

    """
    existing_evaluations = 0
    evaluations = []
    number_of_evaluations = len(models_or_ensembles)
    if resume:
        resume, evaluations = c.checkpoint(c.are_evaluations_created, path,
                                           number_of_evaluations,
                                           debug=args.debug)
        if not resume:
            existing_evaluations = len(evaluations)
            message = u.dated("Found %s evaluations from %s. Resuming.\n" %
                              (existing_evaluations,
                               number_of_evaluations))
            number_of_evaluations -= existing_evaluations
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
    if not resume:
        if args.multi_label:
            evaluation_args = r.set_label_evaluation_args(
                name, description, args, labels, all_labels,
                number_of_evaluations, fields, dataset_fields, fields_map,
                objective_field)
        else:
            evaluation_args = r.set_evaluation_args(name, description, args,
                                                    fields, dataset_fields,
                                                    fields_map)

        evaluations.extend(r.create_evaluations(
            models_or_ensembles, datasets, evaluation_args,
            args, api, path=path, session_file=session_file,
            log=log, existing_evaluations=existing_evaluations))

    return evaluations, resume
Example #9
0
                      console=args.verbosity)
        u.combine_votes(votes_files, local_model.to_prediction,
                        output, args.method)

    # If evaluate flag is on, create remote evaluation and save results in
    # json and human-readable format.
    if args.evaluate:
        if resume:
            resume, evaluation = u.checkpoint(u.is_evaluation_created, path,
                                              debug=args.debug)
            if not resume:
                message = u.dated("Evaluation not found. Resuming.\n")
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)
        if not resume:
            evaluation_args = r.set_evaluation_args(name, description, args,
                                                    fields, fields_map)
            evaluation = r.create_evaluation(model, dataset, evaluation_args,
                                             args, api, path, session_file,
                                             log)

        evaluation = r.get_evaluation(evaluation, api, args.verbosity,
                                      session_file)
        r.save_evaluation(evaluation, output, api)

    # Workaround to restore windows console cp850 encoding to print the tree
    if sys.platform == "win32" and sys.stdout.isatty():
        import locale
        data_locale = locale.getlocale()
        if not data_locale[0] is None:
            locale.setlocale(locale.LC_ALL, (data_locale[0], "850"))
        message = (u"\nGenerated files:\n\n" +