Esempio n. 1
0
def evaluate(models_or_ensembles, datasets, output, api, args, resume,
             session_file=None, path=None, log=None, name=None,
             description=None, fields=None, dataset_fields=None,
             fields_map=None, labels=None, all_labels=None,
             objective_field=None):
    """Evaluates a list of models or ensembles with the given dataset

    """
    evaluation_files = []
    evaluations, resume = evaluations_process(
        models_or_ensembles, datasets, name, description, fields,
        dataset_fields, fields_map, api, args, resume,
        session_file=session_file, path=path, log=log,
        labels=labels, all_labels=all_labels, objective_field=objective_field)
    if args.multi_label:
        file_labels = map(slugify,
                          u.objective_field_names(models_or_ensembles, api))
    for index in range(0, len(evaluations)):
        evaluation = evaluations[index]
        evaluation = r.get_evaluation(evaluation, api, args.verbosity,
                                      session_file)
        file_name = output
        if args.multi_label:
            suffix = file_labels[index]
            file_name += "_%s" % suffix
            evaluation_files.append("%s.json" % file_name)
        r.save_evaluation(evaluation, file_name, api)
    if args.multi_label:
        mean_evaluation = average_evaluations(evaluation_files)
        r.save_evaluation(mean_evaluation, output, api)
    return resume
Esempio n. 2
0
def evaluate(models_or_ensembles,
             datasets,
             api,
             args,
             resume,
             session_file=None,
             path=None,
             log=None,
             fields=None,
             dataset_fields=None,
             labels=None,
             all_labels=None,
             objective_field=None):
    """Evaluates a list of models or ensembles with the given dataset

    """
    output = args.predictions
    evaluation_files = []
    evaluations, resume = evaluations_process(models_or_ensembles,
                                              datasets,
                                              fields,
                                              dataset_fields,
                                              api,
                                              args,
                                              resume,
                                              session_file=session_file,
                                              path=path,
                                              log=log,
                                              labels=labels,
                                              all_labels=all_labels,
                                              objective_field=objective_field)
    if args.multi_label:
        file_labels = map(slugify,
                          u.objective_field_names(models_or_ensembles, api))
    for index in range(0, len(evaluations)):
        evaluation = evaluations[index]
        evaluation = r.get_evaluation(evaluation, api, args.verbosity,
                                      session_file)
        if r.shared_changed(args.shared, evaluation):
            evaluation_args = {"shared": args.shared}
            evaluation = r.update_evaluation(evaluation,
                                             evaluation_args,
                                             args,
                                             api=api,
                                             path=path,
                                             session_file=session_file)
        file_name = output
        if args.multi_label:
            suffix = file_labels[index]
            file_name += "_%s" % suffix
            evaluation_files.append("%s.json" % file_name)
        if args.test_datasets or args.dataset_off:
            suffix = evaluation['resource'].replace('evaluation/', '_')
            file_name += "_%s" % suffix
            evaluation_files.append("%s.json" % file_name)
        r.save_evaluation(evaluation, file_name, api)
    if args.multi_label or args.test_datasets or args.dataset_off:
        mean_evaluation = average_evaluations(evaluation_files)
        r.save_evaluation(mean_evaluation, output, api)
    return resume
Esempio n. 3
0
def evaluate(models_or_ensembles, datasets, api, args, resume,
             session_file=None, path=None, log=None,
             fields=None, dataset_fields=None,
             labels=None, all_labels=None,
             objective_field=None):
    """Evaluates a list of models or ensembles with the given dataset

    """
    output = args.predictions
    evaluation_files = []
    evaluations, resume = evaluations_process(
        models_or_ensembles, datasets, fields,
        dataset_fields, api, args, resume,
        session_file=session_file, path=path, log=log,
        labels=labels, all_labels=all_labels, objective_field=objective_field)
    if hasattr(args, 'multi_label') and args.multi_label:
        file_labels = [slugify(name) for name in
                       u.objective_field_names(models_or_ensembles, api)]
    for index in range(0, len(evaluations)):
        evaluation = evaluations[index]
        evaluation = r.get_evaluation(evaluation, api, args.verbosity,
                                      session_file)
        if r.shared_changed(args.shared, evaluation):
            evaluation_args = {"shared": args.shared}
            evaluation = r.update_evaluation(evaluation, evaluation_args,
                                             args, api=api, path=path,
                                             session_file=session_file)
        file_name = output
        if hasattr(args, 'multi_label') and args.multi_label:
            suffix = file_labels[index]
            file_name += "_%s" % suffix
            evaluation_files.append("%s.json" % file_name)
        if args.test_datasets or args.dataset_off:
            suffix = evaluation['resource'].replace('evaluation/', '_')
            file_name += "_%s" % suffix
            evaluation_files.append("%s.json" % file_name)
        r.save_evaluation(evaluation, file_name, api)
    if (hasattr(args, 'multi_label') and args.multi_label) or \
            args.test_datasets or args.dataset_off:
        mean_evaluation = average_evaluations(evaluation_files)
        r.save_evaluation(mean_evaluation, output, api)
    return resume