Пример #1
0
def evaluations_process(time_series_set, datasets,
                        fields, dataset_fields, api, args, resume,
                        session_file=None, path=None, log=None,
                        objective_field=None):
    """Evaluates time-series against datasets

    """

    existing_evaluations = 0
    evaluations = []
    number_of_evaluations = len(time_series_set)
    if resume:
        resume, evaluations = c.checkpoint(c.are_evaluations_created, path,
                                           number_of_evaluations,
                                           debug=args.debug)
        if not resume:
            existing_evaluations = len(evaluations)
            message = u.dated("Found %s evaluations from %s. Resuming.\n" %
                              (existing_evaluations,
                               number_of_evaluations))
            number_of_evaluations -= existing_evaluations
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
    if not resume:
        evaluation_args = r.set_evaluation_args(args, fields,
                                                dataset_fields)
        evaluations.extend(r.create_evaluations(
            time_series_set, datasets, evaluation_args,
            args, api, path=path, session_file=session_file,
            log=log, existing_evaluations=existing_evaluations))

    return evaluations, resume
Пример #2
0
def evaluations_process(models_or_ensembles,
                        datasets,
                        name,
                        description,
                        fields,
                        dataset_fields,
                        fields_map,
                        api,
                        args,
                        resume,
                        session_file=None,
                        path=None,
                        log=None,
                        labels=None,
                        all_labels=None,
                        objective_field=None):
    """Evaluates models or ensembles against datasets

    """
    existing_evaluations = 0
    evaluations = []
    number_of_evaluations = len(models_or_ensembles)
    if resume:
        resume, evaluations = c.checkpoint(c.are_evaluations_created,
                                           path,
                                           number_of_evaluations,
                                           debug=args.debug)
        if not resume:
            existing_evaluations = len(evaluations)
            message = u.dated("Found %s evaluations from %s. Resuming.\n" %
                              (existing_evaluations, number_of_evaluations))
            number_of_evaluations -= existing_evaluations
            u.log_message(message,
                          log_file=session_file,
                          console=args.verbosity)
    if not resume:
        if args.multi_label:
            evaluation_args = r.set_label_evaluation_args(
                name, description, args, labels, all_labels,
                number_of_evaluations, fields, dataset_fields, fields_map,
                objective_field)
        else:
            evaluation_args = r.set_evaluation_args(name, description, args,
                                                    fields, dataset_fields,
                                                    fields_map)

        evaluations.extend(
            r.create_evaluations(models_or_ensembles,
                                 datasets,
                                 evaluation_args,
                                 args,
                                 api,
                                 path=path,
                                 session_file=session_file,
                                 log=log,
                                 existing_evaluations=existing_evaluations))

    return evaluations, resume
Пример #3
0
def cross_validate(models,
                   dataset,
                   number_of_evaluations,
                   name,
                   description,
                   fields,
                   fields_map,
                   api,
                   args,
                   resume,
                   session_file=None,
                   path=None,
                   log=None):
    """Cross-validates using a MONTE-CARLO variant

    """
    existing_evaluations = 0
    evaluations = []
    if resume:
        resume, evaluations = c.checkpoint(c.are_evaluations_created,
                                           path,
                                           number_of_evaluations,
                                           debug=args.debug)
        if not resume:
            existing_evaluations = len(evaluations)
            message = u.dated("Found %s evaluations from %s. Resuming.\n" %
                              (existing_evaluations, number_of_evaluations))
            number_of_evaluations -= existing_evaluations
            u.log_message(message,
                          log_file=session_file,
                          console=args.verbosity)
    if not resume:
        evaluation_args = r.set_evaluation_args(name, description, args,
                                                fields, fields_map)

        evaluations.extend(
            r.create_evaluations(models, dataset, evaluation_args, args, api,
                                 path, session_file, log,
                                 existing_evaluations))
        evaluations_files = []
        for evaluation in evaluations:
            evaluation = r.get_evaluation(evaluation, api, args.verbosity,
                                          session_file)
            model_id = evaluation['object']['model']
            file_name = "%s%s%s__evaluation" % (path, os.sep,
                                                model_id.replace("/", "_"))
            evaluations_files.append(file_name + ".json")
            r.save_evaluation(evaluation, file_name, api)
        cross_validation = average_evaluations(evaluations_files)
        file_name = "%s%scross_validation" % (path, os.sep)
        r.save_evaluation(cross_validation, file_name, api)
Пример #4
0
def evaluations_process(time_series_set,
                        datasets,
                        fields,
                        dataset_fields,
                        api,
                        args,
                        resume,
                        session_file=None,
                        path=None,
                        log=None,
                        objective_field=None):
    """Evaluates time-series against datasets

    """

    existing_evaluations = 0
    evaluations = []
    number_of_evaluations = len(time_series_set)
    if resume:
        resume, evaluations = c.checkpoint(c.are_evaluations_created,
                                           path,
                                           number_of_evaluations,
                                           debug=args.debug)
        if not resume:
            existing_evaluations = len(evaluations)
            message = u.dated("Found %s evaluations from %s. Resuming.\n" %
                              (existing_evaluations, number_of_evaluations))
            number_of_evaluations -= existing_evaluations
            u.log_message(message,
                          log_file=session_file,
                          console=args.verbosity)
    if not resume:
        evaluation_args = r.set_evaluation_args(args, fields, dataset_fields)
        evaluations.extend(
            r.create_evaluations(time_series_set,
                                 datasets,
                                 evaluation_args,
                                 args,
                                 api,
                                 path=path,
                                 session_file=session_file,
                                 log=log,
                                 existing_evaluations=existing_evaluations))

    return evaluations, resume
Пример #5
0
def cross_validate(models, dataset, number_of_evaluations, name, description,
                   fields, fields_map, api, args, resume,
                   session_file=None, path=None, log=None):
    """Cross-validates using a MONTE-CARLO variant

    """
    existing_evaluations = 0
    evaluations = []
    if resume:
        resume, evaluations = c.checkpoint(c.are_evaluations_created, path,
                                           number_of_evaluations,
                                           debug=args.debug)
        if not resume:
            existing_evaluations = len(evaluations)
            message = u.dated("Found %s evaluations from %s. Resuming.\n" %
                              (existing_evaluations,
                               number_of_evaluations))
            number_of_evaluations -= existing_evaluations
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
    if not resume:
        evaluation_args = r.set_evaluation_args(name, description, args,
                                                fields, fields_map)

        evaluations.extend(r.create_evaluations(models, dataset,
                                                evaluation_args,
                                                args, api, path,
                                                session_file, log,
                                                existing_evaluations))
        evaluations_files = []
        for evaluation in evaluations:
            evaluation = r.get_evaluation(evaluation, api, args.verbosity,
                                          session_file)
            model_id = evaluation['object']['model']
            file_name = "%s%s%s__evaluation" % (path, os.sep,
                                                model_id.replace("/", "_"))
            evaluations_files.append(file_name + ".json")
            r.save_evaluation(evaluation, file_name, api)
        cross_validation = average_evaluations(evaluations_files)
        file_name = "%s%scross_validation" % (path, os.sep)
        r.save_evaluation(cross_validation, file_name, api)
Пример #6
0
def evaluations_process(models_or_ensembles, datasets, name, description,
                        fields, dataset_fields, fields_map, api, args, resume,
                        session_file=None, path=None, log=None, labels=None,
                        all_labels=None, objective_field=None):
    """Evaluates models or ensembles against datasets

    """
    existing_evaluations = 0
    evaluations = []
    number_of_evaluations = len(models_or_ensembles)
    if resume:
        resume, evaluations = c.checkpoint(c.are_evaluations_created, path,
                                           number_of_evaluations,
                                           debug=args.debug)
        if not resume:
            existing_evaluations = len(evaluations)
            message = u.dated("Found %s evaluations from %s. Resuming.\n" %
                              (existing_evaluations,
                               number_of_evaluations))
            number_of_evaluations -= existing_evaluations
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
    if not resume:
        if args.multi_label:
            evaluation_args = r.set_label_evaluation_args(
                name, description, args, labels, all_labels,
                number_of_evaluations, fields, dataset_fields, fields_map,
                objective_field)
        else:
            evaluation_args = r.set_evaluation_args(name, description, args,
                                                    fields, dataset_fields,
                                                    fields_map)

        evaluations.extend(r.create_evaluations(
            models_or_ensembles, datasets, evaluation_args,
            args, api, path=path, session_file=session_file,
            log=log, existing_evaluations=existing_evaluations))

    return evaluations, resume