def models_processing(datasets, models, model_ids, objective_field, fields, api, args, resume, name=None, description=None, model_fields=None, session_file=None, path=None, log=None, labels=None, multi_label_data=None, other_label=None): """Creates or retrieves models from the input data """ ensemble_ids = [] # If we have a dataset but not a model, we create the model if the no_model # flag hasn't been set up. if datasets and not (has_models(args) or args.no_model): dataset = datasets[0] model_ids = [] models = [] if args.multi_label: # If --number-of-models is not set or is 1, create one model per # label. Otherwise, create one ensemble per label with the required # number of models if args.number_of_models < 2: models, model_ids, resume = model_per_label( labels, datasets, fields, objective_field, api, args, resume, name, description, model_fields, multi_label_data, session_file, path, log) else: (ensembles, ensemble_ids, models, model_ids, resume) = ensemble_per_label( labels, dataset, fields, objective_field, api, args, resume, name, description, model_fields, multi_label_data, session_file, path, log) elif args.number_of_models > 1: ensembles = [] # Ensemble of models (ensembles, ensemble_ids, models, model_ids, resume) = ensemble_processing( datasets, objective_field, fields, api, args, resume, name=name, description=description, model_fields=model_fields, session_file=session_file, path=path, log=log) ensemble = ensembles[0] args.ensemble = bigml.api.get_ensemble_id(ensemble) else: # Set of partial datasets created setting args.max_categories if len(datasets) > 1 and args.max_categories: args.number_of_models = len(datasets) # Cross-validation case: we create 2 * n models to be validated # holding out an n% of data if args.cross_validation_rate > 0: if args.number_of_evaluations > 0: args.number_of_models = args.number_of_evaluations else: args.number_of_models = int(MONTECARLO_FACTOR * args.cross_validation_rate) if resume: resume, model_ids = c.checkpoint( c.are_models_created, path, args.number_of_models, debug=args.debug) if not resume: message = u.dated("Found %s models out of %s. Resuming.\n" % (len(model_ids), args.number_of_models)) u.log_message(message, log_file=session_file, console=args.verbosity) models = model_ids args.number_of_models -= len(model_ids) if args.max_categories > 0: objective_field = None model_args = r.set_model_args(name, description, args, objective_field, fields, model_fields, other_label) models, model_ids = r.create_models(datasets, models, model_args, args, api, path, session_file, log) # If a model is provided, we use it. elif args.model: model_ids = [args.model] models = model_ids[:] elif args.models or args.model_tag: models = model_ids[:] if args.ensemble: ensemble = r.get_ensemble(args.ensemble, api, args.verbosity, session_file) ensemble_ids = [ensemble] model_ids = ensemble['object']['models'] models = model_ids[:] if args.ensembles or args.ensemble_tag: model_ids = [] ensemble_ids = [] # Parses ensemble/ids if provided. if args.ensemble_tag: ensemble_ids = (ensemble_ids + u.list_ids(api.list_ensembles, "tags__in=%s" % args.ensemble_tag)) else: ensemble_ids = u.read_resources(args.ensembles) for ensemble_id in ensemble_ids: ensemble = r.get_ensemble(ensemble_id, api) if args.ensemble is None: args.ensemble = ensemble_id model_ids.extend(ensemble['object']['models']) models = model_ids[:] # If we are going to predict we must retrieve the models if model_ids and args.test_set and not args.evaluate: models, model_ids = r.get_models(models, args, api, session_file) return models, model_ids, ensemble_ids, resume
def models_processing(dataset, models, model_ids, name, description, test_set, objective_field, fields, model_fields, api, args, resume, session_file=None, path=None, log=None): """Creates or retrieves models from the input data """ log_models = False # If we have a dataset but not a model, we create the model if the no_model # flag hasn't been set up. if (dataset and not args.model and not model_ids and not args.no_model and not args.ensemble): model_ids = [] models = [] if args.number_of_models > 1: # Ensemble of models ensemble, resume = ensemble_processing(dataset, name, description, objective_field, fields, api, args, resume, session_file=session_file, path=path, log=log) args.ensemble = bigml.api.get_ensemble_id(ensemble) log_models = True else: # Cross-validation case: we create 2 * n models to be validated # holding out an n% of data if args.cross_validation_rate > 0: if args.number_of_evaluations > 0: args.number_of_models = args.number_of_evaluations else: args.number_of_models = int(MONTECARLO_FACTOR * args.cross_validation_rate) if resume: resume, model_ids = c.checkpoint(c.are_models_created, path, args.number_of_models, debug=args.debug) if not resume: message = u.dated( "Found %s models out of %s. Resuming.\n" % (len(model_ids), args.number_of_models)) u.log_message(message, log_file=session_file, console=args.verbosity) models = model_ids args.number_of_models -= len(model_ids) model_args = r.set_model_args(name, description, args, objective_field, fields, model_fields) models, model_ids = r.create_models(dataset, models, model_args, args, api, path, session_file, log) # If a model is provided, we use it. elif args.model: model_ids = [args.model] models = model_ids[:] elif args.models or args.model_tag: models = model_ids[:] if args.ensemble: ensemble = r.get_ensemble(args.ensemble, api, args.verbosity, session_file) model_ids = ensemble['object']['models'] if log_models: for model_id in model_ids: u.log_created_resources("models", path, model_id, open_mode='a') models = model_ids[:] # If we are going to predict we must retrieve the models if model_ids and test_set and not args.evaluate: models, model_ids = r.get_models(models, args, api, session_file) return models, model_ids, resume
def models_processing(dataset, models, model_ids, name, description, test_set, objective_field, fields, model_fields, api, args, resume, session_file=None, path=None, log=None): """Creates or retrieves models from the input data """ log_models = False # If we have a dataset but not a model, we create the model if the no_model # flag hasn't been set up. if (dataset and not args.model and not model_ids and not args.no_model and not args.ensemble): model_ids = [] models = [] if args.number_of_models > 1: # Ensemble of models ensemble, resume = ensemble_processing( dataset, name, description, objective_field, fields, api, args, resume, session_file=session_file, path=path, log=log) args.ensemble = bigml.api.get_ensemble_id(ensemble) log_models = True else: # Cross-validation case: we create 2 * n models to be validated # holding out an n% of data if args.cross_validation_rate > 0: if args.number_of_evaluations > 0: args.number_of_models = args.number_of_evaluations else: args.number_of_models = int(MONTECARLO_FACTOR * args.cross_validation_rate) if resume: resume, model_ids = c.checkpoint( c.are_models_created, path, args.number_of_models, debug=args.debug) if not resume: message = u.dated("Found %s models out of %s. Resuming.\n" % (len(model_ids), args.number_of_models)) u.log_message(message, log_file=session_file, console=args.verbosity) models = model_ids args.number_of_models -= len(model_ids) model_args = r.set_model_args(name, description, args, objective_field, fields, model_fields) models, model_ids = r.create_models(dataset, models, model_args, args, api, path, session_file, log) # If a model is provided, we use it. elif args.model: model_ids = [args.model] models = model_ids[:] elif args.models or args.model_tag: models = model_ids[:] if args.ensemble: ensemble = r.get_ensemble(args.ensemble, api, args.verbosity, session_file) model_ids = ensemble['object']['models'] if log_models: for model_id in model_ids: u.log_created_resources("models", path, model_id, open_mode='a') models = model_ids[:] # If we are going to predict we must retrieve the models if model_ids and test_set and not args.evaluate: models, model_ids = r.get_models(models, args, api, session_file) return models, model_ids, resume
def models_processing(datasets, models, model_ids, api, args, resume, fields=None, session_file=None, path=None, log=None, labels=None, multi_label_data=None, other_label=None): """Creates or retrieves models from the input data """ ensemble_ids = [] # If we have a dataset but not a model, we create the model if the no_model # flag hasn't been set up. if datasets and not (args.has_models_ or args.no_model): dataset = datasets[0] model_ids = [] models = [] if args.multi_label: # If --number-of-models is not set or is 1, and there's # no boosting options on, create one model per # label. Otherwise, create one ensemble per label with the required # number of models if args.number_of_models < 2 and not args.boosting: models, model_ids, resume = model_per_label( labels, datasets, api, args, resume, fields=fields, multi_label_data=multi_label_data, session_file=session_file, path=path, log=log) else: (ensembles, ensemble_ids, models, model_ids, resume) = ensemble_per_label( labels, dataset, api, args, resume, fields=fields, multi_label_data=multi_label_data, session_file=session_file, path=path, log=log) elif args.number_of_models > 1 or args.boosting: ensembles = [] # Ensembles of models (ensembles, ensemble_ids, models, model_ids, resume) = ensemble_processing(datasets, api, args, resume, fields=fields, session_file=session_file, path=path, log=log) ensemble = ensembles[0] args.ensemble = bigml.api.get_ensemble_id(ensemble) else: # Set of partial datasets created setting args.max_categories if len(datasets) > 1 and args.max_categories: args.number_of_models = len(datasets) if ((args.test_datasets and args.evaluate) or (args.datasets and args.evaluate and args.dataset_off)): args.number_of_models = len(args.dataset_ids) # Cross-validation case: we create 2 * n models to be validated # holding out an n% of data if args.cross_validation_rate > 0: if args.number_of_evaluations > 0: args.number_of_models = args.number_of_evaluations else: args.number_of_models = int(MONTECARLO_FACTOR * args.cross_validation_rate) if resume: resume, model_ids = c.checkpoint(c.are_models_created, path, args.number_of_models, debug=args.debug) if not resume: message = u.dated( "Found %s models out of %s. Resuming.\n" % (len(model_ids), args.number_of_models)) u.log_message(message, log_file=session_file, console=args.verbosity) models = model_ids args.number_of_models -= len(model_ids) model_args = r.set_model_args(args, fields=fields, objective_id=args.objective_id_, model_fields=args.model_fields_, other_label=other_label) models, model_ids = r.create_models(datasets, models, model_args, args, api, path, session_file, log) # If a model is provided, we use it. elif args.model: model_ids = [args.model] models = model_ids[:] elif args.models or args.model_tag: models = model_ids[:] if args.ensemble: if not args.ensemble in ensemble_ids: ensemble_ids.append(args.ensemble) if not args.evaluate: ensemble = r.get_ensemble(args.ensemble, api, args.verbosity, session_file) model_ids = ensemble['object']['models'] models = model_ids[:] if args.ensembles or args.ensemble_tag: model_ids = [] ensemble_ids = [] # Parses ensemble/ids if provided. if args.ensemble_tag: ensemble_ids = (ensemble_ids + u.list_ids( api.list_ensembles, "tags__in=%s" % args.ensemble_tag)) else: ensemble_ids = u.read_resources(args.ensembles) for ensemble_id in ensemble_ids: ensemble = r.get_ensemble(ensemble_id, api) if args.ensemble is None: args.ensemble = ensemble_id model_ids.extend(ensemble['object']['models']) models = model_ids[:] # If we are going to predict we must retrieve the models if model_ids and args.test_set and not args.evaluate: models, model_ids = r.get_models(models, args, api, session_file) return models, model_ids, ensemble_ids, resume
def compute_output(api, args, training_set, test_set=None, output=None, objective_field=None, description=None, field_attributes=None, types=None, dataset_fields=None, model_fields=None, name=None, training_set_header=True, test_set_header=True, model_ids=None, votes_files=None, resume=False, fields_map=None): """ Creates one or more models using the `training_set` or uses the ids of previously created BigML models to make predictions for the `test_set`. """ source = None dataset = None model = None models = None fields = None # It is compulsory to have a description to publish either datasets or # models if (not description and (args.black_box or args.white_box or args.public_dataset)): raise Exception("You should provide a description to publish.") path = u.check_dir(output) session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG) csv_properties = {} # If logging is required, open the file for logging log = None if args.log_file: u.check_dir(args.log_file) log = args.log_file # If --clear_logs the log files are cleared if args.clear_logs: try: open(log, 'w', 0).close() except IOError: pass # Starting source processing if (training_set or (args.evaluate and test_set)): # If resuming, try to extract args.source form log files if resume: resume, args.source = u.checkpoint(u.is_source_created, path, debug=args.debug) if not resume: message = u.dated("Source not found. Resuming.\n") u.log_message(message, log_file=session_file, console=args.verbosity) # If neither a previous source, dataset or model are provided. # we create a new one. Also if --evaluate and test data are provided # we create a new dataset to test with. data_set, data_set_header = r.data_to_source(training_set, test_set, training_set_header, test_set_header, args) if data_set is not None: source_args = r.set_source_args(data_set_header, name, description, args) source = r.create_source(data_set, source_args, args, api, path, session_file, log) # If a source is provided either through the command line or in resume # steps, we use it. elif args.source: source = bigml.api.get_source_id(args.source) # If we already have source, we check that is finished, extract the # fields, and update them if needed. if source: source = r.get_source(source, api, args.verbosity, session_file) if 'source_parser' in source['object']: source_parser = source['object']['source_parser'] if 'missing_tokens' in source_parser: csv_properties['missing_tokens'] = ( source_parser['missing_tokens']) if 'data_locale' in source_parser: csv_properties['data_locale'] = source_parser['locale'] fields = Fields(source['object']['fields'], **csv_properties) if field_attributes: source = r.update_source_fields(source, field_attributes, fields, api, args.verbosity, session_file) if types: source = r.update_source_fields(source, types, fields, api, args.verbosity, session_file) # End of source processing # Starting dataset processing if (training_set or args.source or (args.evaluate and test_set)): # if resuming, try to extract args.dataset form log files if resume: resume, args.dataset = u.checkpoint(u.is_dataset_created, path, debug=args.debug) if not resume: message = u.dated("Dataset not found. Resuming.\n") u.log_message(message, log_file=session_file, console=args.verbosity) # If we have a source but not dataset or model has been provided, we # create a new dataset if the no_dataset option isn't set up. Also # if evaluate is set and test_set has been provided. if ((source and not args.dataset and not args.model and not model_ids and not args.no_dataset) or (args.evaluate and args.test_set and not args.dataset)): dataset_args = r.set_dataset_args(name, description, args, fields, dataset_fields) dataset = r.create_dataset(source, dataset_args, args, api, path, session_file, log) # If a dataset is provided, let's retrieve it. elif args.dataset: dataset = bigml.api.get_dataset_id(args.dataset) # If we already have a dataset, we check the status and get the fields if # we hadn't them yet. if dataset: dataset = r.get_dataset(dataset, api, args.verbosity, session_file) if not csv_properties and 'locale' in dataset['object']: csv_properties = { 'data_locale': dataset['object']['locale']} fields = Fields(dataset['object']['fields'], **csv_properties) if args.public_dataset: r.publish_dataset(dataset, api, args, session_file) #end of dataset processing #start of model processing # If we have a dataset but not a model, we create the model if the no_model # flag hasn't been set up. if (dataset and not args.model and not model_ids and not args.no_model): model_ids = [] models = [] if resume: resume, model_ids = u.checkpoint(u.are_models_created, path, args.number_of_models, debug=args.debug) if not resume: message = u.dated("Found %s models out of %s. Resuming.\n" % (len(model_ids), args.number_of_models)) u.log_message(message, log_file=session_file, console=args.verbosity) models = model_ids args.number_of_models -= len(model_ids) model_args = r.set_model_args(name, description, args, objective_field, fields, model_fields) models, model_ids = r.create_models(dataset, models, model_args, args, api, path, session_file, log) model = models[0] # If a model is provided, we use it. elif args.model: model = args.model model_ids = [model] models = [model] elif args.models or args.model_tag: models = model_ids[:] model = models[0] # If we are going to predict we must retrieve the models if model_ids and test_set and not args.evaluate: models, model_ids = r.get_models(models, args, api, session_file) model = models[0] # We get the fields of the model if we haven't got # them yet and update its public state if needed if model and not args.evaluate and (test_set or args.black_box or args.white_box): if args.black_box or args.white_box: model = r.publish_model(model, args, api, session_file) models[0] = model if not csv_properties: csv_properties = {} csv_properties.update(verbose=True) if args.user_locale is None: args.user_locale = model['object'].get('locale', None) csv_properties.update(data_locale=args.user_locale) if 'model_fields' in model['object']['model']: model_fields = model['object']['model']['model_fields'].keys() csv_properties.update(include=model_fields) if 'missing_tokens' in model['object']['model']: missing_tokens = model['object']['model']['missing_tokens'] else: missing_tokens = MISSING_TOKENS csv_properties.update(missing_tokens=missing_tokens) objective_field = models[0]['object']['objective_fields'] if isinstance(objective_field, list): objective_field = objective_field[0] csv_properties.update(objective_field=objective_field) fields = Fields(model['object']['model']['fields'], **csv_properties) # end of model processing # If predicting if models and test_set and not args.evaluate: predict(test_set, test_set_header, models, fields, output, objective_field, args.remote, api, log, args.max_batch_models, args.method, resume, args.tag, args.verbosity, session_file, args.debug) # When combine_votes flag is used, retrieve the predictions files saved # in the comma separated list of directories and combine them if votes_files: model_id = re.sub(r'.*(model_[a-f0-9]{24})__predictions\.csv$', r'\1', votes_files[0]).replace("_", "/") try: model = api.check_resource(model_id, api.get_model) except ValueError, exception: sys.exit("Failed to get model %s: %s" % (model_id, str(exception))) local_model = Model(model) message = u.dated("Combining votes.\n") u.log_message(message, log_file=session_file, console=args.verbosity) u.combine_votes(votes_files, local_model.to_prediction, output, args.method)