def evaluations_process(models_or_ensembles, datasets, name, description, fields, dataset_fields, fields_map, api, args, resume, session_file=None, path=None, log=None, labels=None, all_labels=None, objective_field=None): """Evaluates models or ensembles against datasets """ existing_evaluations = 0 evaluations = [] number_of_evaluations = len(models_or_ensembles) if resume: resume, evaluations = c.checkpoint(c.are_evaluations_created, path, number_of_evaluations, debug=args.debug) if not resume: existing_evaluations = len(evaluations) message = u.dated("Found %s evaluations from %s. Resuming.\n" % (existing_evaluations, number_of_evaluations)) number_of_evaluations -= existing_evaluations u.log_message(message, log_file=session_file, console=args.verbosity) if not resume: if args.multi_label: evaluation_args = r.set_label_evaluation_args( name, description, args, labels, all_labels, number_of_evaluations, fields, dataset_fields, fields_map, objective_field) else: evaluation_args = r.set_evaluation_args(name, description, args, fields, dataset_fields, fields_map) evaluations.extend( r.create_evaluations(models_or_ensembles, datasets, evaluation_args, args, api, path=path, session_file=session_file, log=log, existing_evaluations=existing_evaluations)) return evaluations, resume
def evaluations_process(models_or_ensembles, datasets, name, description, fields, dataset_fields, fields_map, api, args, resume, session_file=None, path=None, log=None, labels=None, all_labels=None, objective_field=None): """Evaluates models or ensembles against datasets """ existing_evaluations = 0 evaluations = [] number_of_evaluations = len(models_or_ensembles) if resume: resume, evaluations = c.checkpoint(c.are_evaluations_created, path, number_of_evaluations, debug=args.debug) if not resume: existing_evaluations = len(evaluations) message = u.dated("Found %s evaluations from %s. Resuming.\n" % (existing_evaluations, number_of_evaluations)) number_of_evaluations -= existing_evaluations u.log_message(message, log_file=session_file, console=args.verbosity) if not resume: if args.multi_label: evaluation_args = r.set_label_evaluation_args( name, description, args, labels, all_labels, number_of_evaluations, fields, dataset_fields, fields_map, objective_field) else: evaluation_args = r.set_evaluation_args(name, description, args, fields, dataset_fields, fields_map) evaluations.extend(r.create_evaluations( models_or_ensembles, datasets, evaluation_args, args, api, path=path, session_file=session_file, log=log, existing_evaluations=existing_evaluations)) return evaluations, resume