def alternative_dataset_processing( dataset_or_source, suffix, dataset_args, api, args, resume, session_file=None, path=None, log=None ): """Creates a dataset. Used in splits to generate train and test datasets """ alternative_dataset = None # if resuming, try to extract dataset form log files if resume: message = u.dated("Dataset not found. Resuming.\n") resume, alternative_dataset = c.checkpoint( c.is_dataset_created, path, "_%s" % suffix, debug=args.debug, message=message, log_file=session_file, console=args.verbosity, ) if alternative_dataset is None: alternative_dataset = r.create_dataset( dataset_or_source, dataset_args, args, api, path, session_file, log, suffix ) if alternative_dataset: alternative_dataset = r.get_dataset(alternative_dataset, api, args.verbosity, session_file) return alternative_dataset, resume
def alternative_dataset_processing(dataset_or_source, suffix, dataset_args, api, args, resume, session_file=None, path=None, log=None): """Creates a dataset. Used in splits to generate train and test datasets """ alternative_dataset = None # if resuming, try to extract dataset form log files if resume: message = u.dated("Dataset not found. Resuming.\n") resume, alternative_dataset = c.checkpoint(c.is_dataset_created, path, "_%s" % suffix, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if alternative_dataset is None: alternative_dataset = r.create_dataset(dataset_or_source, dataset_args, args, api, path, session_file, log, suffix) if alternative_dataset: alternative_dataset = r.get_dataset(alternative_dataset, api, args.verbosity, session_file) return alternative_dataset, resume
def split_processing(dataset, name, description, api, args, resume, session_file=None, path=None, log=None): """Splits a dataset into train and test datasets """ train_dataset = None test_dataset = None sample_rate = 1 - args.test_split # if resuming, try to extract train dataset form log files if resume: message = u.dated("Dataset not found. Resuming.\n") resume, train_dataset = c.checkpoint( c.is_dataset_created, path, "_train", debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if train_dataset is None: dataset_split_args = r.set_dataset_split_args( "%s - train (%s %%)" % (name, int(sample_rate * 100)), description, args, sample_rate, out_of_bag=False) train_dataset = r.create_dataset( dataset, dataset_split_args, args, api, path, session_file, log, "train") if train_dataset: train_dataset = r.get_dataset(train_dataset, api, args.verbosity, session_file) # if resuming, try to extract test dataset form log files if resume: message = u.dated("Dataset not found. Resuming.\n") resume, test_dataset = c.checkpoint( c.is_dataset_created, path, "_test", debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if test_dataset is None: dataset_split_args = r.set_dataset_split_args( "%s - test (%s %%)" % (name, int(args.test_split * 100)), description, args, sample_rate, out_of_bag=True) test_dataset = r.create_dataset( dataset, dataset_split_args, args, api, path, session_file, log, "test") if test_dataset: test_dataset = r.get_dataset(test_dataset, api, args.verbosity, session_file) return train_dataset, test_dataset, resume
def dataset_processing(source, training_set, test_set, model_ids, name, description, fields, dataset_fields, api, args, resume, csv_properties=None, session_file=None, path=None, log=None): """Creating or retrieving dataset from input arguments """ dataset = None if (training_set or args.source or (args.evaluate and test_set)): # if resuming, try to extract args.dataset form log files if resume: message = u.dated("Dataset not found. Resuming.\n") resume, args.dataset = c.checkpoint(c.is_dataset_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) # If we have a source but no dataset or model has been provided, we # create a new dataset if the no_dataset option isn't set up. Also # if evaluate is set and test_set has been provided. if ((source and not args.dataset and not args.model and not model_ids and not args.no_dataset) or (args.evaluate and args.test_set and not args.dataset)): dataset_args = r.set_dataset_args(name, description, args, fields, dataset_fields) dataset = r.create_dataset(source, dataset_args, args, api, path, session_file, log) # If a dataset is provided, let's retrieve it. elif args.dataset: dataset = bigml.api.get_dataset_id(args.dataset) # If we already have a dataset, we check the status and get the fields if # we hadn't them yet. if dataset: dataset = r.get_dataset(dataset, api, args.verbosity, session_file) if not csv_properties and 'locale' in dataset['object']: csv_properties = {'data_locale': dataset['object']['locale']} fields = Fields(dataset['object']['fields'], **csv_properties) if args.public_dataset: r.publish_dataset(dataset, api, args, session_file) return dataset, resume, csv_properties, fields
def dataset_processing(source, training_set, test_set, model_ids, name, description, fields, dataset_fields, api, args, resume, csv_properties=None, session_file=None, path=None, log=None): """Creating or retrieving dataset from input arguments """ dataset = None if (training_set or args.source or (args.evaluate and test_set)): # if resuming, try to extract args.dataset form log files if resume: message = u.dated("Dataset not found. Resuming.\n") resume, args.dataset = c.checkpoint( c.is_dataset_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) # If we have a source but no dataset or model has been provided, we # create a new dataset if the no_dataset option isn't set up. Also # if evaluate is set and test_set has been provided. if ((source and not args.dataset and not args.model and not model_ids and not args.no_dataset) or (args.evaluate and args.test_set and not args.dataset)): dataset_args = r.set_dataset_args(name, description, args, fields, dataset_fields) dataset = r.create_dataset(source, dataset_args, args, api, path, session_file, log) # If a dataset is provided, let's retrieve it. elif args.dataset: dataset = bigml.api.get_dataset_id(args.dataset) # If we already have a dataset, we check the status and get the fields if # we hadn't them yet. if dataset: dataset = r.get_dataset(dataset, api, args.verbosity, session_file) if not csv_properties and 'locale' in dataset['object']: csv_properties = { 'data_locale': dataset['object']['locale']} fields = Fields(dataset['object']['fields'], **csv_properties) if args.public_dataset: r.publish_dataset(dataset, api, args, session_file) return dataset, resume, csv_properties, fields
def dataset_processing(source, api, args, resume, fields=None, csv_properties=None, multi_label_data=None, session_file=None, path=None, log=None): """Creating or retrieving dataset from input arguments """ datasets = [] dataset = None if (args.training_set or args.source or (hasattr(args, "evaluate") and args.evaluate and args.test_set)): # if resuming, try to extract args.dataset form log files if resume: message = u.dated("Dataset not found. Resuming.\n") resume, args.dataset = c.checkpoint(c.is_dataset_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) # If we have a source but no dataset or model has been provided, we # create a new dataset if the no_dataset option isn't set up. Also # if evaluate is set and test_set has been provided. if ((source and not args.has_datasets_ and not args.has_models_ and not args.no_dataset) or (hasattr(args, "evaluate") and args.evaluate and args.test_set and not args.dataset)): dataset_args = r.set_dataset_args(args, fields, multi_label_data=multi_label_data) dataset = r.create_dataset(source, dataset_args, args, api, path, session_file, log) # If set of datasets is provided, let's check their ids. elif args.dataset_ids: for i in range(0, len(args.dataset_ids)): dataset_id = args.dataset_ids[i] if isinstance(dataset_id, dict) and "id" in dataset_id: dataset_id = dataset_id["id"] datasets.append(bigml.api.get_dataset_id(dataset_id)) dataset = datasets[0] # If a dataset is provided, let's retrieve it. elif args.dataset: dataset = bigml.api.get_dataset_id(args.dataset) # If we already have a dataset, we check the status and get the fields if # we hadn't them yet. if dataset: dataset = r.get_dataset(dataset, api, args.verbosity, session_file) if ('object' in dataset and 'objective_field' in dataset['object'] and 'column_number' in dataset['object']['objective_field']): dataset_objective = dataset['object']['objective_field'][ 'column_number'] csv_properties.update(objective_field=dataset_objective, objective_field_present=True) fields = get_fields_structure(dataset, csv_properties) if args.public_dataset: r.publish_dataset(dataset, args, api, session_file) if hasattr(args, 'objective_field'): new_objective = get_new_objective(fields, args.objective_field) else: new_objective = None updated = False # We'll update the dataset if # the flag --dataset_attributes is used # the --multi-label flag is used and there's an --objective-field # the --max-categories flag is used and there's an --objective-field # the --impor-fields flag is used if check_dataset_update(args, dataset): dataset_args = r.set_dataset_args(args, fields) if args.shared_flag and r.shared_changed(args.shared, dataset): dataset_args.update(shared=args.shared) dataset = r.update_dataset(dataset, dataset_args, args, api=api, path=path, session_file=session_file) dataset = r.get_dataset(dataset, api, args.verbosity, session_file) updated = True if new_objective is not None: csv_properties.update(objective_field=args.objective_field, objective_field_present=True) updated = True if updated: fields = Fields(dataset['object']['fields'], **csv_properties) if not datasets: datasets = [dataset] else: datasets[0] = dataset return datasets, resume, csv_properties, fields
def split_processing(dataset, name, description, api, args, resume, session_file=None, path=None, log=None): """Splits a dataset into train and test datasets """ train_dataset = None test_dataset = None sample_rate = 1 - args.test_split # if resuming, try to extract train dataset form log files if resume: message = u.dated("Dataset not found. Resuming.\n") resume, train_dataset = c.checkpoint(c.is_dataset_created, path, "_train", debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if train_dataset is None: dataset_split_args = r.set_dataset_split_args( "%s - train (%s %%)" % (name, int(sample_rate * 100)), description, args, sample_rate, out_of_bag=False) train_dataset = r.create_dataset(dataset, dataset_split_args, args, api, path, session_file, log, "train") if train_dataset: train_dataset = r.get_dataset(train_dataset, api, args.verbosity, session_file) # if resuming, try to extract test dataset form log files if resume: message = u.dated("Dataset not found. Resuming.\n") resume, test_dataset = c.checkpoint(c.is_dataset_created, path, "_test", debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if test_dataset is None: dataset_split_args = r.set_dataset_split_args( "%s - test (%s %%)" % (name, int(args.test_split * 100)), description, args, sample_rate, out_of_bag=True) test_dataset = r.create_dataset(dataset, dataset_split_args, args, api, path, session_file, log, "test") if test_dataset: test_dataset = r.get_dataset(test_dataset, api, args.verbosity, session_file) return train_dataset, test_dataset, resume
def dataset_processing(source, api, args, resume, fields=None, csv_properties=None, multi_label_data=None, session_file=None, path=None, log=None): """Creating or retrieving dataset from input arguments """ datasets = [] dataset = None if (args.training_set or args.source or ( hasattr(args, "evaluate") and args.evaluate and args.test_set)): # if resuming, try to extract args.dataset form log files if resume: message = u.dated("Dataset not found. Resuming.\n") resume, args.dataset = c.checkpoint( c.is_dataset_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) # If we have a source but no dataset or model has been provided, we # create a new dataset if the no_dataset option isn't set up. Also # if evaluate is set and test_set has been provided. if ((source and not args.has_datasets_ and not args.has_models_ and not args.no_dataset) or (hasattr(args, "evaluate") and args.evaluate and args.test_set and not args.dataset)): dataset_args = r.set_dataset_args(args, fields, multi_label_data=multi_label_data) dataset = r.create_dataset(source, dataset_args, args, api, path, session_file, log) # If a dataset is provided, let's retrieve it. elif args.dataset: dataset = bigml.api.get_dataset_id(args.dataset) # If set of datasets is provided, let's check their ids. elif args.dataset_ids: for i in range(0, len(args.dataset_ids)): datasets.append(bigml.api.get_dataset_id(args.dataset_ids[i])) dataset = datasets[0] # If we already have a dataset, we check the status and get the fields if # we hadn't them yet. if dataset: dataset = r.get_dataset(dataset, api, args.verbosity, session_file) if ('object' in dataset and 'objective_field' in dataset['object'] and 'column_number' in dataset['object']['objective_field']): dataset_objective = dataset[ 'object']['objective_field']['column_number'] csv_properties.update(objective_field=dataset_objective, objective_field_present=True) fields = get_fields_structure(dataset, csv_properties) if args.public_dataset: r.publish_dataset(dataset, args, api, session_file) if hasattr(args, 'objective_field'): new_objective = get_new_objective(fields, args.objective_field) else: new_objective = None updated = False # We'll update the dataset if # the flag --dataset_attributes is used # the --multi-label flag is used and there's an --objective-field # the --max-categories flag is used and there's an --objective-field if check_dataset_update(args, dataset): dataset_args = r.set_dataset_args(args, fields) if args.shared_flag and r.shared_changed(args.shared, dataset): dataset_args.update(shared=args.shared) dataset = r.update_dataset(dataset, dataset_args, args, api=api, path=path, session_file=session_file) dataset = r.get_dataset(dataset, api, args.verbosity, session_file) updated = True if new_objective is not None: csv_properties.update(objective_field=args.objective_field, objective_field_present=True) updated = True if updated: fields = Fields(dataset['object']['fields'], **csv_properties) if not datasets: datasets = [dataset] else: datasets[0] = dataset return datasets, resume, csv_properties, fields
def dataset_processing( source, training_set, test_set, fields, objective_field, api, args, resume, name=None, description=None, dataset_fields=None, multi_label_data=None, csv_properties=None, session_file=None, path=None, log=None, ): """Creating or retrieving dataset from input arguments """ datasets = [] dataset = None if training_set or args.source or (args.evaluate and test_set): # if resuming, try to extract args.dataset form log files if resume: message = u.dated("Dataset not found. Resuming.\n") resume, args.dataset = c.checkpoint( c.is_dataset_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity, ) # If we have a source but no dataset or model has been provided, we # create a new dataset if the no_dataset option isn't set up. Also # if evaluate is set and test_set has been provided. if (source and not has_datasets(args) and not has_models(args) and not args.no_dataset) or ( args.evaluate and args.test_set and not args.dataset ): dataset_args = r.set_dataset_args( name, description, args, fields, dataset_fields, objective_field=objective_field, multi_label_data=multi_label_data, ) dataset = r.create_dataset(source, dataset_args, args, api, path, session_file, log) # If a dataset is provided, let's retrieve it. elif args.dataset: dataset = bigml.api.get_dataset_id(args.dataset) # If set of datasets is provided, let's check their ids. elif args.dataset_ids: for i in range(0, len(args.dataset_ids)): datasets.append(bigml.api.get_dataset_id(args.dataset_ids[i])) dataset = datasets[0] # If we already have a dataset, we check the status and get the fields if # we hadn't them yet. if dataset: dataset = r.get_dataset(dataset, api, args.verbosity, session_file) if ( "object" in dataset and "objective_field" in dataset["object"] and "column_number" in dataset["object"]["objective_field"] ): dataset_objective = dataset["object"]["objective_field"]["column_number"] csv_properties.update(objective_field=dataset_objective, objective_field_present=True) fields = get_fields_structure(dataset, csv_properties) if args.public_dataset: r.publish_dataset(dataset, args, api, session_file) new_objective = get_new_objective(fields, args.objective_field, dataset) if ( new_objective is not None or args.dataset_attributes or (args.shared_flag and r.shared_changed(args.shared, dataset)) ): dataset_args = r.set_dataset_args(name, description, args, fields, dataset_fields, objective_field) if args.shared_flag and r.shared_changed(args.shared, dataset): dataset_args.update(shared=args.shared) dataset = r.update_dataset(dataset, dataset_args, args, api=api, path=path, session_file=session_file) dataset = r.get_dataset(dataset, api, args.verbosity, session_file) csv_properties.update(objective_field=objective_field, objective_field_present=True) fields = Fields(dataset["object"]["fields"], **csv_properties) if not datasets: datasets = [dataset] else: datasets[0] = dataset return datasets, resume, csv_properties, fields
def dataset_processing(source, training_set, test_set, fields, objective_field, api, args, resume, name=None, description=None, dataset_fields=None, multi_label_data=None, csv_properties=None, session_file=None, path=None, log=None): """Creating or retrieving dataset from input arguments """ datasets = [] dataset = None if (training_set or args.source or (args.evaluate and test_set)): # if resuming, try to extract args.dataset form log files if resume: message = u.dated("Dataset not found. Resuming.\n") resume, args.dataset = c.checkpoint( c.is_dataset_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) # If we have a source but no dataset or model has been provided, we # create a new dataset if the no_dataset option isn't set up. Also # if evaluate is set and test_set has been provided. if ((source and not has_datasets(args) and not has_models(args) and not args.no_dataset) or (args.evaluate and args.test_set and not args.dataset)): dataset_args = r.set_dataset_args(name, description, args, fields, dataset_fields, objective_field=objective_field, multi_label_data=multi_label_data) dataset = r.create_dataset(source, dataset_args, args, api, path, session_file, log) # If a dataset is provided, let's retrieve it. elif args.dataset: dataset = bigml.api.get_dataset_id(args.dataset) # If set of datasets is provided, let's check their ids. elif args.dataset_ids: for i in range(0, len(args.dataset_ids)): datasets.append(bigml.api.get_dataset_id(args.dataset_ids[i])) dataset = datasets[0] # If we already have a dataset, we check the status and get the fields if # we hadn't them yet. if dataset: dataset = r.get_dataset(dataset, api, args.verbosity, session_file) if ('object' in dataset and 'objective_field' in dataset['object'] and 'column_number' in dataset['object']['objective_field']): dataset_objective = dataset[ 'object']['objective_field']['column_number'] csv_properties.update(objective_field=dataset_objective, objective_field_present=True) fields = get_fields_structure(dataset, csv_properties) if args.public_dataset: r.publish_dataset(dataset, args, api, session_file) new_objective = get_new_objective(fields, args.objective_field, dataset) if (new_objective is not None or args.dataset_attributes or r.shared_changed(args.shared, dataset)): dataset_args = r.set_dataset_args(name, description, args, fields, dataset_fields, objective_field) dataset_args.update(shared=args.shared) dataset = r.update_dataset(dataset, dataset_args, args, api=api, path=path, session_file=session_file) dataset = r.get_dataset(dataset, api, args.verbosity, session_file) csv_properties.update(objective_field=objective_field, objective_field_present=True) fields = Fields(dataset['object']['fields'], **csv_properties) if not datasets: datasets = [dataset] else: datasets[0] = dataset return datasets, resume, csv_properties, fields
def compute_output(api, args, training_set, test_set=None, output=None, objective_field=None, description=None, field_attributes=None, types=None, dataset_fields=None, model_fields=None, name=None, training_set_header=True, test_set_header=True, model_ids=None, votes_files=None, resume=False, fields_map=None): """ Creates one or more models using the `training_set` or uses the ids of previously created BigML models to make predictions for the `test_set`. """ source = None dataset = None model = None models = None fields = None # It is compulsory to have a description to publish either datasets or # models if (not description and (args.black_box or args.white_box or args.public_dataset)): raise Exception("You should provide a description to publish.") path = u.check_dir(output) session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG) csv_properties = {} # If logging is required, open the file for logging log = None if args.log_file: u.check_dir(args.log_file) log = args.log_file # If --clear_logs the log files are cleared if args.clear_logs: try: open(log, 'w', 0).close() except IOError: pass # Starting source processing if (training_set or (args.evaluate and test_set)): # If resuming, try to extract args.source form log files if resume: resume, args.source = u.checkpoint(u.is_source_created, path, debug=args.debug) if not resume: message = u.dated("Source not found. Resuming.\n") u.log_message(message, log_file=session_file, console=args.verbosity) # If neither a previous source, dataset or model are provided. # we create a new one. Also if --evaluate and test data are provided # we create a new dataset to test with. data_set, data_set_header = r.data_to_source(training_set, test_set, training_set_header, test_set_header, args) if data_set is not None: source_args = r.set_source_args(data_set_header, name, description, args) source = r.create_source(data_set, source_args, args, api, path, session_file, log) # If a source is provided either through the command line or in resume # steps, we use it. elif args.source: source = bigml.api.get_source_id(args.source) # If we already have source, we check that is finished, extract the # fields, and update them if needed. if source: source = r.get_source(source, api, args.verbosity, session_file) if 'source_parser' in source['object']: source_parser = source['object']['source_parser'] if 'missing_tokens' in source_parser: csv_properties['missing_tokens'] = ( source_parser['missing_tokens']) if 'data_locale' in source_parser: csv_properties['data_locale'] = source_parser['locale'] fields = Fields(source['object']['fields'], **csv_properties) if field_attributes: source = r.update_source_fields(source, field_attributes, fields, api, args.verbosity, session_file) if types: source = r.update_source_fields(source, types, fields, api, args.verbosity, session_file) # End of source processing # Starting dataset processing if (training_set or args.source or (args.evaluate and test_set)): # if resuming, try to extract args.dataset form log files if resume: resume, args.dataset = u.checkpoint(u.is_dataset_created, path, debug=args.debug) if not resume: message = u.dated("Dataset not found. Resuming.\n") u.log_message(message, log_file=session_file, console=args.verbosity) # If we have a source but not dataset or model has been provided, we # create a new dataset if the no_dataset option isn't set up. Also # if evaluate is set and test_set has been provided. if ((source and not args.dataset and not args.model and not model_ids and not args.no_dataset) or (args.evaluate and args.test_set and not args.dataset)): dataset_args = r.set_dataset_args(name, description, args, fields, dataset_fields) dataset = r.create_dataset(source, dataset_args, args, api, path, session_file, log) # If a dataset is provided, let's retrieve it. elif args.dataset: dataset = bigml.api.get_dataset_id(args.dataset) # If we already have a dataset, we check the status and get the fields if # we hadn't them yet. if dataset: dataset = r.get_dataset(dataset, api, args.verbosity, session_file) if not csv_properties and 'locale' in dataset['object']: csv_properties = { 'data_locale': dataset['object']['locale']} fields = Fields(dataset['object']['fields'], **csv_properties) if args.public_dataset: r.publish_dataset(dataset, api, args, session_file) #end of dataset processing #start of model processing # If we have a dataset but not a model, we create the model if the no_model # flag hasn't been set up. if (dataset and not args.model and not model_ids and not args.no_model): model_ids = [] models = [] if resume: resume, model_ids = u.checkpoint(u.are_models_created, path, args.number_of_models, debug=args.debug) if not resume: message = u.dated("Found %s models out of %s. Resuming.\n" % (len(model_ids), args.number_of_models)) u.log_message(message, log_file=session_file, console=args.verbosity) models = model_ids args.number_of_models -= len(model_ids) model_args = r.set_model_args(name, description, args, objective_field, fields, model_fields) models, model_ids = r.create_models(dataset, models, model_args, args, api, path, session_file, log) model = models[0] # If a model is provided, we use it. elif args.model: model = args.model model_ids = [model] models = [model] elif args.models or args.model_tag: models = model_ids[:] model = models[0] # If we are going to predict we must retrieve the models if model_ids and test_set and not args.evaluate: models, model_ids = r.get_models(models, args, api, session_file) model = models[0] # We get the fields of the model if we haven't got # them yet and update its public state if needed if model and not args.evaluate and (test_set or args.black_box or args.white_box): if args.black_box or args.white_box: model = r.publish_model(model, args, api, session_file) models[0] = model if not csv_properties: csv_properties = {} csv_properties.update(verbose=True) if args.user_locale is None: args.user_locale = model['object'].get('locale', None) csv_properties.update(data_locale=args.user_locale) if 'model_fields' in model['object']['model']: model_fields = model['object']['model']['model_fields'].keys() csv_properties.update(include=model_fields) if 'missing_tokens' in model['object']['model']: missing_tokens = model['object']['model']['missing_tokens'] else: missing_tokens = MISSING_TOKENS csv_properties.update(missing_tokens=missing_tokens) objective_field = models[0]['object']['objective_fields'] if isinstance(objective_field, list): objective_field = objective_field[0] csv_properties.update(objective_field=objective_field) fields = Fields(model['object']['model']['fields'], **csv_properties) # end of model processing # If predicting if models and test_set and not args.evaluate: predict(test_set, test_set_header, models, fields, output, objective_field, args.remote, api, log, args.max_batch_models, args.method, resume, args.tag, args.verbosity, session_file, args.debug) # When combine_votes flag is used, retrieve the predictions files saved # in the comma separated list of directories and combine them if votes_files: model_id = re.sub(r'.*(model_[a-f0-9]{24})__predictions\.csv$', r'\1', votes_files[0]).replace("_", "/") try: model = api.check_resource(model_id, api.get_model) except ValueError, exception: sys.exit("Failed to get model %s: %s" % (model_id, str(exception))) local_model = Model(model) message = u.dated("Combining votes.\n") u.log_message(message, log_file=session_file, console=args.verbosity) u.combine_votes(votes_files, local_model.to_prediction, output, args.method)