def get_samples(sample_ids, args, api=None, session_file=None, query_string=''): """Retrieves remote samples in its actual status """ if api is None: api = bigml.api.BigML() sample_id = "" samples = sample_ids sample_id = sample_ids[0] message = dated("Retrieving %s. %s\n" % (plural("sample", len(sample_ids)), get_url(sample_id))) log_message(message, log_file=session_file, console=args.verbosity) # only one sample to predict at present try: sample = api.get_sample(sample_ids[0], query_string=query_string) check_resource_error( sample, "Failed to create sample: %s" % sample['resource']) sample = check_resource(sample, api=api, query_string=query_string, raise_on_error=True) except Exception, exception: sys.exit("Failed to get a finished sample: %s" % str(exception))
def get_models(model_ids, args, api=None, session_file=None): """Retrieves remote models in its actual status """ if api is None: api = bigml.api.BigML() model_id = "" models = model_ids single_model = len(model_ids) == 1 if single_model: model_id = model_ids[0] message = dated("Retrieving %s. %s\n" % (plural("model", len(model_ids)), get_url(model_id))) log_message(message, log_file=session_file, console=args.verbosity) if len(model_ids) < args.max_batch_models: models = [] for model in model_ids: try: # if there's more than one model the first one must contain # the entire field structure to be used as reference. query_string = (ALL_FIELDS_QS if not single_model and (len(models) == 0 or args.multi_label) else FIELDS_QS) model = check_resource(model, api.get_model, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) models.append(model) model = models[0]
def get_time_series(time_series_ids, args, api=None, session_file=None): """Retrieves remote time-series in its actual status """ if api is None: api = bigml.api.BigML() time_series_id = "" time_series_set = time_series_ids time_series_id = time_series_ids[0] message = dated( "Retrieving %s. %s\n" % (plural("time-series", len(time_series_ids)), get_url(time_series_id))) log_message(message, log_file=session_file, console=args.verbosity) # only one time-series to predict at present try: # we need the whole fields structure when exporting fields query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS time_series = check_resource(time_series_ids[0], api.get_time_series, query_string=query_string, raise_on_error=True) except Exception, exception: sys.exit("Failed to get a finished time-series: %s" % \ str(exception))
def create_ensembles(datasets, ensemble_ids, ensemble_args, args, number_of_ensembles=1, api=None, path=None, session_file=None, log=None): """Create ensembles from input data """ if api is None: api = bigml.api.BigML() ensembles = ensemble_ids[:] existing_ensembles = len(ensembles) model_ids = [] ensemble_args_list = [] if isinstance(ensemble_args, list): ensemble_args_list = ensemble_args if args.dataset_off and args.evaluate: args.test_dataset_ids = datasets[:] if not args.multi_label: datasets = datasets[existing_ensembles:] if number_of_ensembles > 0: message = dated("Creating %s.\n" % plural("ensemble", number_of_ensembles)) log_message(message, log_file=session_file, console=args.verbosity) inprogress = [] for i in range(0, number_of_ensembles): wait_for_available_tasks(inprogress, args.max_parallel_ensembles, api, "ensemble", wait_step=args.number_of_models) if ensemble_args_list: ensemble_args = ensemble_args_list[i] if args.dataset_off and args.evaluate: multi_dataset = args.test_dataset_ids[:] del multi_dataset[i + existing_ensembles] ensemble = api.create_ensemble(multi_dataset, ensemble_args, retries=None) else: ensemble = api.create_ensemble(datasets, ensemble_args, retries=None) ensemble_id = check_resource_error(ensemble, "Failed to create ensemble: ") log_message("%s\n" % ensemble_id, log_file=log) ensemble_ids.append(ensemble_id) inprogress.append(ensemble_id) ensembles.append(ensemble) log_created_resources("ensembles", path, ensemble_id, mode='a') models, model_ids = retrieve_ensembles_models(ensembles, api, path) if number_of_ensembles < 2 and args.verbosity: message = dated("Ensemble created: %s\n" % get_url(ensemble)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, ensemble) return ensembles, ensemble_ids, models, model_ids
def get_logistic_regressions(logistic_regression_ids, args, api=None, session_file=None): """Retrieves remote logistic regression in its actual status """ if api is None: api = bigml.api.BigML() logistic_regression_id = "" logistic_regressions = logistic_regression_ids logistic_regression_id = logistic_regression_ids[0] message = dated( "Retrieving %s. %s\n" % (plural("logistic regression", len(logistic_regression_ids)), get_url(logistic_regression_id))) log_message(message, log_file=session_file, console=args.verbosity) # only one logistic regression to predict at present try: # we need the whole fields structure when exporting fields query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS logistic_regression = check_resource(logistic_regression_ids[0], api.get_logistic_regression, query_string=query_string, raise_on_error=True) except Exception, exception: sys.exit("Failed to get a finished logistic regression: %s" % \ str(exception))
def get_models(model_ids, args, api=None, session_file=None): """Retrieves remote models in its actual status """ if api is None: api = bigml.api.BigML() model_id = "" models = model_ids if len(model_ids) == 1: model_id = model_ids[0] message = dated("Retrieving %s. %s\n" % (plural("model", len(model_ids)), get_url(model_id))) log_message(message, log_file=session_file, console=args.verbosity) if len(model_ids) < args.max_batch_models: models = [] for model in model_ids: try: model = check_resource(model, api.get_model, query_string=FIELDS_QS) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) models.append(model) model = models[0]
def get_models(model_ids, args, api=None, session_file=None): """Retrieves remote models in its actual status """ if api is None: api = bigml.api.BigML() model_id = "" models = model_ids single_model = len(model_ids) == 1 if single_model: model_id = model_ids[0] message = dated("Retrieving %s. %s\n" % (plural("model", len(model_ids)), get_url(model_id))) log_message(message, log_file=session_file, console=args.verbosity) if len(model_ids) < args.max_batch_models: models = [] for model in model_ids: try: # if there's more than one model the first one must contain # the entire field structure to be used as reference. query_string = ( ALL_FIELDS_QS if ( (not single_model and (not models or args.multi_label)) or not args.test_header) else FIELDS_QS) model = check_resource(model, api.get_model, query_string=query_string, raise_on_error=True) except Exception, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) models.append(model) model = models[0]
def create_samples(datasets, sample_ids, sample_args, args, api=None, path=None, session_file=None, log=None): """Create remote samples """ if api is None: api = bigml.api.BigML() samples = sample_ids[:] existing_samples = len(samples) sample_args_list = [] datasets = datasets[existing_samples:] # if resuming and all samples were created, there will be no datasets left if datasets: if isinstance(sample_args, list): sample_args_list = sample_args # Only one sample per command, at present number_of_samples = 1 max_parallel_samples = 1 message = dated("Creating %s.\n" % plural("sample", number_of_samples)) log_message(message, log_file=session_file, console=args.verbosity) inprogress = [] for i in range(0, number_of_samples): wait_for_available_tasks(inprogress, max_parallel_samples, api, "sample") if sample_args_list: sample_args = sample_args_list[i] sample = api.create_sample(datasets[i], sample_args, retries=None) sample_id = check_resource_error(sample, "Failed to create sample: ") log_message("%s\n" % sample_id, log_file=log) sample_ids.append(sample_id) inprogress.append(sample_id) samples.append(sample) log_created_resources("samples", path, sample_id, mode='a') if args.verbosity: if bigml.api.get_status(sample)['code'] != bigml.api.FINISHED: try: sample = check_resource(sample, api.get_sample, raise_on_error=True) except Exception, exception: sys.exit("Failed to get a finished sample: %s" % str(exception)) samples[0] = sample message = dated("Sample created: %s\n" % get_url(sample)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, sample)
def create_models(dataset, model_ids, model_args, args, api=None, path=None, session_file=None, log=None): """Create remote models """ if api is None: api = bigml.api.BigML() models = model_ids[:] existing_models = len(models) model_args_list = [] if isinstance(model_args, list): model_args_list = model_args if args.number_of_models > 0: message = dated("Creating %s.\n" % plural("model", args.number_of_models)) log_message(message, log_file=session_file, console=args.verbosity) single_model = args.number_of_models == 1 and existing_models == 0 # if there's more than one model the first one must contain # the entire field structure to be used as reference. query_string = (FIELDS_QS if single_model else ALL_FIELDS_QS) for i in range(0, args.number_of_models): if i % args.max_parallel_models == 0 and i > 0: try: models[i - 1] = check_resource( models[i - 1], api.get_model, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) if model_args_list: model_args = model_args_list[i] if args.cross_validation_rate > 0: new_seed = get_basic_seed(i + existing_models) model_args.update(seed=new_seed) model = api.create_model(dataset, model_args) model_id = check_resource_error(model, "Failed to create model: ") log_message("%s\n" % model_id, log_file=log) model_ids.append(model_id) models.append(model) log_created_resources("models", path, model_id, open_mode='a') if args.number_of_models < 2 and args.verbosity: if bigml.api.get_status(model)['code'] != bigml.api.FINISHED: try: model = check_resource(model, api.get_model, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) models[0] = model message = dated("Model created: %s.\n" % get_url(model)) log_message(message, log_file=session_file, console=args.verbosity)
def create_models(dataset, model_ids, model_args, args, api=None, path=None, session_file=None, log=None): """Create remote models """ if api is None: api = bigml.api.BigML() models = model_ids[:] existing_models = len(models) last_model = None if args.number_of_models > 0: message = dated("Creating %s.\n" % plural("model", args.number_of_models)) log_message(message, log_file=session_file, console=args.verbosity) for i in range(0, args.number_of_models): if i % args.max_parallel_models == 0 and i > 0: try: models[i - 1] = check_resource(models[i - 1], api.get_model, query_string=FIELDS_QS) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) if args.cross_validation_rate > 0: new_seed = get_basic_seed(i + existing_models) model_args.update(seed=new_seed) model = api.create_model(dataset, model_args) log_message("%s\n" % model['resource'], log_file=log) model_ids.append(model['resource']) models.append(model) log_created_resources("models", path, bigml.api.get_model_id(model), open_mode='a') check_resource_error( model, "Failed to create model %s:" % model['resource']) if args.number_of_models < 2 and args.verbosity: if bigml.api.get_status(model)['code'] != bigml.api.FINISHED: try: model = check_resource(model, api.get_model, query_string=FIELDS_QS) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) models[0] = model message = dated("Model created: %s.\n" % get_url(model)) log_message(message, log_file=session_file, console=args.verbosity)
def create_clusters(datasets, cluster_ids, cluster_args, args, api=None, path=None, session_file=None, log=None): """Create remote clusters """ if api is None: api = bigml.api.BigML() clusters = cluster_ids[:] existing_clusters = len(clusters) cluster_args_list = [] datasets = datasets[existing_clusters:] # if resuming and all clusters were created, there will be no datasets left if datasets: if isinstance(cluster_args, list): cluster_args_list = cluster_args # Only one cluster per command, at present number_of_clusters = 1 message = dated("Creating %s.\n" % plural("cluster", number_of_clusters)) log_message(message, log_file=session_file, console=args.verbosity) query_string = FIELDS_QS inprogress = [] for i in range(0, number_of_clusters): wait_for_available_tasks(inprogress, args.max_parallel_clusters, api, "cluster") if cluster_args_list: cluster_args = cluster_args_list[i] cluster = api.create_cluster(datasets, cluster_args, retries=None) cluster_id = check_resource_error(cluster, "Failed to create cluster: ") log_message("%s\n" % cluster_id, log_file=log) cluster_ids.append(cluster_id) inprogress.append(cluster_id) clusters.append(cluster) log_created_resources("clusters", path, cluster_id, mode='a') if args.verbosity: if bigml.api.get_status(cluster)['code'] != bigml.api.FINISHED: try: cluster = check_resource(cluster, api.get_cluster, query_string=query_string, raise_on_error=True) except Exception, exception: sys.exit("Failed to get a finished cluster: %s" % str(exception)) clusters[0] = cluster message = dated("Cluster created: %s\n" % get_url(cluster)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, cluster)
def create_ensembles(datasets, ensemble_ids, ensemble_args, args, number_of_ensembles=1, api=None, path=None, session_file=None, log=None): """Create ensembles from input data """ if api is None: api = bigml.api.BigML() ensembles = ensemble_ids[:] model_ids = [] ensemble_args_list = [] if isinstance(ensemble_args, list): ensemble_args_list = ensemble_args if number_of_ensembles > 0: message = dated("Creating %s.\n" % plural("ensemble", number_of_ensembles)) log_message(message, log_file=session_file, console=args.verbosity) query_string = ALL_FIELDS_QS inprogress = [] for i in range(0, number_of_ensembles): wait_for_available_tasks(inprogress, args.max_parallel_ensembles, api.get_ensemble, "ensemble", query_string=query_string, wait_step=args.number_of_models) if ensemble_args_list: ensemble_args = ensemble_args_list[i] ensemble = api.create_ensemble(datasets, ensemble_args) ensemble_id = check_resource_error(ensemble, "Failed to create ensemble: ") log_message("%s\n" % ensemble_id, log_file=log) ensemble_ids.append(ensemble_id) inprogress.append(ensemble_id) ensembles.append(ensemble) log_created_resources("ensembles", path, ensemble_id, open_mode='a') models, model_ids = retrieve_ensembles_models(ensembles, api, path) if number_of_ensembles < 2 and args.verbosity: message = dated("Ensemble created: %s.\n" % get_url(ensemble)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, ensemble) return ensembles, ensemble_ids, models, model_ids
def create_models(dataset, model_ids, model_args, args, api=None, path=None, session_file=None, log=None): """Create remote models """ if api is None: api = bigml.api.BigML() models = model_ids[:] existing_models = len(models) last_model = None if args.number_of_models > 0: message = dated("Creating %s.\n" % plural("model", args.number_of_models)) log_message(message, log_file=session_file, console=args.verbosity) for i in range(0, args.number_of_models): if i % args.max_parallel_models == 0 and i > 0: try: models[i - 1] = check_resource( models[i - 1], api.get_model, query_string=FIELDS_QS) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) if args.cross_validation_rate > 0: new_seed = get_basic_seed(i + existing_models) model_args.update(seed=new_seed) model = api.create_model(dataset, model_args) log_message("%s\n" % model['resource'], log_file=log) model_ids.append(model['resource']) models.append(model) log_created_resources("models", path, bigml.api.get_model_id(model), open_mode='a') check_resource_error(model, "Failed to create model %s:" % model['resource']) if args.number_of_models < 2 and args.verbosity: if bigml.api.get_status(model)['code'] != bigml.api.FINISHED: try: model = check_resource(model, api.get_model, query_string=FIELDS_QS) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) models[0] = model message = dated("Model created: %s.\n" % get_url(model)) log_message(message, log_file=session_file, console=args.verbosity)
def create_ensembles(dataset, ensemble_ids, ensemble_args, args, number_of_ensembles=1, api=None, path=None, session_file=None, log=None): """Create ensembles from input data """ if api is None: api = bigml.api.BigML() ensembles = ensemble_ids[:] model_ids = [] ensemble_args_list = [] if isinstance(ensemble_args, list): ensemble_args_list = ensemble_args if number_of_ensembles > 0: message = dated("Creating %s.\n" % plural("ensemble", number_of_ensembles)) log_message(message, log_file=session_file, console=args.verbosity) query_string = ALL_FIELDS_QS for i in range(0, number_of_ensembles): if i % args.max_parallel_ensembles == 0 and i > 0: try: ensembles[i - 1] = check_resource( ensembles[i - 1], api.get_ensemble, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished ensemble: %s" % str(exception)) if ensemble_args_list: ensemble_args = ensemble_args_list[i] ensemble = api.create_ensemble(dataset, ensemble_args) ensemble_id = check_resource_error(ensemble, "Failed to create ensemble: ") log_message("%s\n" % ensemble_id, log_file=log) ensemble_ids.append(ensemble_id) ensembles.append(ensemble) log_created_resources("ensembles", path, ensemble_id, open_mode='a') models, model_ids = retrieve_ensembles_models(ensembles, api, path) if number_of_ensembles < 2 and args.verbosity: message = dated("Ensemble created: %s.\n" % get_url(ensemble)) log_message(message, log_file=session_file, console=args.verbosity)
def get_clusters(cluster_ids, args, api=None, session_file=None): """Retrieves remote clusters in its actual status """ if api is None: api = bigml.api.BigML() cluster_id = "" clusters = cluster_ids cluster_id = cluster_ids[0] message = dated("Retrieving %s. %s\n" % (plural("cluster", len(cluster_ids)), get_url(cluster_id))) log_message(message, log_file=session_file, console=args.verbosity) # only one cluster to predict at present try: # we need the whole fields structure when exporting fields query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS cluster = check_resource(cluster_ids[0], api.get_cluster, query_string=query_string, raise_on_error=True) except Exception, exception: sys.exit("Failed to get a finished cluster: %s" % str(exception))
def get_associations(association_ids, args, api=None, session_file=None): """Retrieves remote associations in its actual status """ if api is None: api = bigml.api.BigML() association_id = "" associations = association_ids association_id = association_ids[0] message = dated("Retrieving %s. %s\n" % (plural("association", len(association_ids)), get_url(association_id))) log_message(message, log_file=session_file, console=args.verbosity) # only one association to predict at present try: query_string = FIELDS_QS association = check_resource(association_ids[0], api.get_association, query_string=query_string, raise_on_error=True) except Exception, exception: sys.exit("Failed to get a finished association: %s" % str(exception))
def create_topic_models(datasets, topic_model_ids, topic_model_args, args, api=None, path=None, session_file=None, log=None): """Create remote topic models """ if api is None: api = bigml.api.BigML() topic_models = topic_model_ids[:] existing_topic_models = len(topic_models) topic_model_args_list = [] datasets = datasets[existing_topic_models:] # if resuming and all topic models were created, there will # be no datasets left if datasets: if isinstance(topic_model_args, list): topic_model_args_list = topic_model_args # Only one topic model per command, at present number_of_topic_models = 1 message = dated("Creating %s.\n" % plural("topic model", number_of_topic_models)) log_message(message, log_file=session_file, console=args.verbosity) query_string = FIELDS_QS inprogress = [] for i in range(0, number_of_topic_models): wait_for_available_tasks(inprogress, args.max_parallel_topic_models, api, "topicmodel") if topic_model_args_list: topic_model_args = topic_model_args_list[i] topic_model = api.create_topic_model(datasets, topic_model_args, retries=None) topic_model_id = check_resource_error( \ topic_model, "Failed to create topic model: ") log_message("%s\n" % topic_model_id, log_file=log) topic_model_ids.append(topic_model_id) inprogress.append(topic_model_id) topic_models.append(topic_model) log_created_resources("topic_models", path, topic_model_id, mode='a') if args.verbosity: if bigml.api.get_status(topic_model)['code'] != bigml.api.FINISHED: try: topic_model = check_resource( \ topic_model, api.get_topic_model, query_string=query_string, raise_on_error=True) except Exception, exception: sys.exit("Failed to get a finished topic model: %s" % str(exception)) topic_models[0] = topic_model message = dated("Topic model created: %s\n" % get_url(topic_model)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, topic_model)
def create_anomalies(datasets, anomaly_ids, anomaly_args, args, api=None, path=None, session_file=None, log=None): """Create remote anomalies """ if api is None: api = bigml.api.BigML() anomalies = anomaly_ids[:] existing_anomalies = len(anomalies) anomaly_args_list = [] datasets = datasets[existing_anomalies:] # if resuming and all anomalies were created, # there will be no datasets left if datasets: if isinstance(anomaly_args, list): anomaly_args_list = anomaly_args # Only one anomaly per command, at present number_of_anomalies = 1 message = dated("Creating %s.\n" % plural("anomaly detector", number_of_anomalies)) log_message(message, log_file=session_file, console=args.verbosity) query_string = FIELDS_QS inprogress = [] for i in range(0, number_of_anomalies): wait_for_available_tasks(inprogress, args.max_parallel_anomalies, api, "anomaly") if anomaly_args_list: anomaly_args = anomaly_args_list[i] anomaly = api.create_anomaly(datasets, anomaly_args, retries=None) anomaly_id = check_resource_error(anomaly, "Failed to create anomaly: ") log_message("%s\n" % anomaly_id, log_file=log) anomaly_ids.append(anomaly_id) inprogress.append(anomaly_id) anomalies.append(anomaly) log_created_resources("anomalies", path, anomaly_id, mode='a') if args.verbosity: if bigml.api.get_status(anomaly)['code'] != bigml.api.FINISHED: try: anomaly = check_resource(anomaly, api.get_anomaly, query_string=query_string, raise_on_error=True) except Exception, exception: sys.exit("Failed to get a finished anomaly: %s" % str(exception)) anomalies[0] = anomaly message = dated("Anomaly created: %s\n" % get_url(anomaly)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, anomaly)
def create_logistic_regressions(datasets, logistic_regression_ids, logistic_regression_args, args, api=None, path=None, session_file=None, log=None): """Create remote logistic regressions """ if api is None: api = bigml.api.BigML() logistic_regressions = logistic_regression_ids[:] existing_logistic_regressions = len(logistic_regressions) logistic_regression_args_list = [] datasets = datasets[existing_logistic_regressions:] # if resuming and all logistic regressions were created, # there will be no datasets left if datasets: if isinstance(logistic_regression_args, list): logistic_regression_args_list = logistic_regression_args # Only one logistic regression per command, at present number_of_logistic_regressions = 1 message = dated( "Creating %s.\n" % plural("logistic regression", number_of_logistic_regressions)) log_message(message, log_file=session_file, console=args.verbosity) query_string = FIELDS_QS inprogress = [] for i in range(0, number_of_logistic_regressions): wait_for_available_tasks(inprogress, args.max_parallel_logistic_regressions, api, "logisticregression") if logistic_regression_args_list: logistic_regression_args = logistic_regression_args_list[i] if args.cross_validation_rate > 0: new_seed = get_basic_seed(i + existing_logistic_regressions) logistic_regression_args.update(seed=new_seed) if (args.test_datasets and args.evaluate): dataset = datasets[i] logistic_regression = api.create_logistic_regression( \ dataset, logistic_regression_args, retries=None) elif args.dataset_off and args.evaluate: multi_dataset = args.test_dataset_ids[:] del multi_dataset[i + existing_logistic_regressions] logistic_regression = api.create_logistic_regression( \ multi_dataset, logistic_regression_args, retries=None) else: logistic_regression = api.create_logistic_regression( \ datasets, logistic_regression_args, retries=None) logistic_regression_id = check_resource_error( \ logistic_regression, "Failed to create logistic regression: ") log_message("%s\n" % logistic_regression_id, log_file=log) logistic_regression_ids.append(logistic_regression_id) inprogress.append(logistic_regression_id) logistic_regressions.append(logistic_regression) log_created_resources("logistic_regressions", path, logistic_regression_id, mode='a') if args.verbosity: if bigml.api.get_status(logistic_regression)['code'] != \ bigml.api.FINISHED: try: logistic_regression = check_resource( \ logistic_regression, api.get_logistic_regression, query_string=query_string, raise_on_error=True) except Exception, exception: sys.exit("Failed to get a finished logistic regression:" " %s" % str(exception)) logistic_regressions[0] = logistic_regression message = dated("Logistic regression created: %s\n" % get_url(logistic_regression)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, logistic_regression)
def compute_output(api, args, training_set, test_set=None, output=None, objective_field=None, description=None, field_attributes=None, types=None, dataset_fields=None, model_fields=None, name=None, training_set_header=True, test_set_header=True, model_ids=None, votes_files=None, resume=False, fields_map=None): """ Creates one or more models using the `training_set` or uses the ids of previously created BigML models to make predictions for the `test_set`. """ source = None dataset = None model = None models = None fields = None path = u.check_dir(output) session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG) csv_properties = {} # If logging is required, open the file for logging log = None if args.log_file: u.check_dir(args.log_file) log = args.log_file # If --clear_logs the log files are cleared if args.clear_logs: try: open(log, 'w', 0).close() except IOError: pass if (training_set or (args.evaluate and test_set)): if resume: resume, args.source = u.checkpoint(u.is_source_created, path, bigml.api, debug=args.debug) if not resume: message = u.dated("Source not found. Resuming.\n") u.log_message(message, log_file=session_file, console=args.verbosity) # If neither a previous source, dataset or model are provided. # we create a new one. Also if --evaluate and test data are provided # we create a new dataset to test with. data_set = None if (training_set and not args.source and not args.dataset and not args.model and not args.models): data_set = training_set data_set_header = training_set_header elif (args.evaluate and test_set and not args.source): data_set = test_set data_set_header = test_set_header if not data_set is None: source_args = { "name": name, "description": description, "category": args.category, "tags": args.tag, "source_parser": {"header": data_set_header}} message = u.dated("Creating source.\n") u.log_message(message, log_file=session_file, console=args.verbosity) source = api.create_source(data_set, source_args, progress_bar=args.progress_bar) source = api.check_resource(source, api.get_source) message = u.dated("Source created: %s\n" % u.get_url(source, api)) u.log_message(message, log_file=session_file, console=args.verbosity) u.log_message("%s\n" % source['resource'], log_file=log) fields = Fields(source['object']['fields'], source['object']['source_parser']['missing_tokens'], source['object']['source_parser']['locale']) source_file = open(path + '/source', 'w', 0) source_file.write("%s\n" % source['resource']) source_file.write("%s\n" % source['object']['name']) source_file.flush() source_file.close() # If a source is provided, we retrieve it. elif args.source: message = u.dated("Retrieving source. %s\n" % u.get_url(args.source, api)) u.log_message(message, log_file=session_file, console=args.verbosity) source = api.get_source(args.source) # If we already have source, we check that is finished and extract the # fields, and update them if needed. if source: if source['object']['status']['code'] != bigml.api.FINISHED: message = u.dated("Retrieving source. %s\n" % u.get_url(source, api)) u.log_message(message, log_file=session_file, console=args.verbosity) source = api.check_resource(source, api.get_source) csv_properties = {'missing_tokens': source['object']['source_parser']['missing_tokens'], 'data_locale': source['object']['source_parser']['locale']} fields = Fields(source['object']['fields'], **csv_properties) update_fields = {} if field_attributes: for (column, value) in field_attributes.iteritems(): update_fields.update({ fields.field_id(column): value}) message = u.dated("Updating source. %s\n" % u.get_url(source, api)) u.log_message(message, log_file=session_file, console=args.verbosity) source = api.update_source(source, {"fields": update_fields}) update_fields = {} if types: for (column, value) in types.iteritems(): update_fields.update({ fields.field_id(column): {'optype': value}}) message = u.dated("Updating source. %s\n" % u.get_url(source, api)) u.log_message(message, log_file=session_file, console=args.verbosity) source = api.update_source(source, {"fields": update_fields}) if (training_set or args.source or (args.evaluate and test_set)): if resume: resume, args.dataset = u.checkpoint(u.is_dataset_created, path, bigml.api, debug=args.debug) if not resume: message = u.dated("Dataset not found. Resuming.\n") u.log_message(message, log_file=session_file, console=args.verbosity) # If we have a source but not dataset or model has been provided, we # create a new dataset if the no_dataset option isn't set up. Also # if evaluate is set and test_set has been provided. if ((source and not args.dataset and not args.model and not model_ids and not args.no_dataset) or (args.evaluate and args.test_set and not args.dataset)): dataset_args = { "name": name, "description": description, "category": args.category, "tags": args.tag } if args.json_filter: dataset_args.update(json_filter=args.json_filter) elif args.lisp_filter: dataset_args.update(lisp_filter=args.lisp_filter) input_fields = [] if dataset_fields: for name in dataset_fields: input_fields.append(fields.field_id(name)) dataset_args.update(input_fields=input_fields) message = u.dated("Creating dataset.\n") u.log_message(message, log_file=session_file, console=args.verbosity) dataset = api.create_dataset(source, dataset_args) dataset = api.check_resource(dataset, api.get_dataset) message = u.dated("Dataset created: %s\n" % u.get_url(dataset, api)) u.log_message(message, log_file=session_file, console=args.verbosity) u.log_message("%s\n" % dataset['resource'], log_file=log) dataset_file = open(path + '/dataset', 'w', 0) dataset_file.write("%s\n" % dataset['resource']) dataset_file.flush() dataset_file.close() # If a dataset is provided, let's retrieve it. elif args.dataset: message = u.dated("Retrieving dataset. %s\n" % u.get_url(args.dataset, api)) u.log_message(message, log_file=session_file, console=args.verbosity) dataset = api.get_dataset(args.dataset) # If we already have a dataset, we check the status and get the fields if # we hadn't them yet. if dataset: if dataset['object']['status']['code'] != bigml.api.FINISHED: message = u.dated("Retrieving dataset. %s\n" % u.get_url(dataset, api)) u.log_message(message, log_file=session_file, console=args.verbosity) dataset = api.check_resource(dataset, api.get_dataset) if not csv_properties: csv_properties = {'data_locale': dataset['object']['locale']} if args.public_dataset: if not description: raise Exception("You should provide a description to publish.") public_dataset = {"private": False} if args.dataset_price: message = u.dated("Updating dataset. %s\n" % u.get_url(dataset, api)) u.log_message(message, log_file=session_file, console=args.verbosity) public_dataset.update(price=args.dataset_price) message = u.dated("Updating dataset. %s\n" % u.get_url(dataset, api)) u.log_message(message, log_file=session_file, console=args.verbosity) dataset = api.update_dataset(dataset, public_dataset) fields = Fields(dataset['object']['fields'], **csv_properties) # If we have a dataset but not a model, we create the model if the no_model # flag hasn't been set up. if (dataset and not args.model and not model_ids and not args.no_model): model_args = { "name": name, "description": description, "category": args.category, "tags": args.tag } if objective_field is not None: model_args.update({"objective_field": fields.field_id(objective_field)}) # If evaluate flag is on, we choose a deterministic sampling with 80% # of the data to create the model if args.evaluate: if args.sample_rate == 1: args.sample_rate = EVALUATE_SAMPLE_RATE seed = SEED model_args.update(seed=seed) input_fields = [] if model_fields: for name in model_fields: input_fields.append(fields.field_id(name)) model_args.update(input_fields=input_fields) if args.pruning and args.pruning != 'smart': model_args.update(stat_pruning=(args.pruning == 'statistical')) model_args.update(sample_rate=args.sample_rate, replacement=args.replacement, randomize=args.randomize) model_ids = [] models = [] if resume: resume, model_ids = u.checkpoint(u.are_models_created, path, args.number_of_models, bigml.api, debug=args.debug) if not resume: message = u.dated("Found %s models out of %s. Resuming.\n" % (len(model_ids), args.number_of_models)) u.log_message(message, log_file=session_file, console=args.verbosity) models = model_ids args.number_of_models -= len(model_ids) model_file = open(path + '/models', 'w', 0) for model_id in model_ids: model_file.write("%s\n" % model_id) last_model = None if args.number_of_models > 0: message = u.dated("Creating %s.\n" % u.plural("model", args.number_of_models)) u.log_message(message, log_file=session_file, console=args.verbosity) for i in range(1, args.number_of_models + 1): if i > args.max_parallel_models: api.check_resource(last_model, api.get_model) model = api.create_model(dataset, model_args) u.log_message("%s\n" % model['resource'], log_file=log) last_model = model model_ids.append(model['resource']) models.append(model) model_file.write("%s\n" % model['resource']) model_file.flush() if args.number_of_models < 2 and args.verbosity: if model['object']['status']['code'] != bigml.api.FINISHED: model = api.check_resource(model, api.get_model) models[0] = model message = u.dated("Model created: %s.\n" % u.get_url(model, api)) u.log_message(message, log_file=session_file, console=args.verbosity) model_file.close() # If a model is provided, we retrieve it. elif args.model: message = u.dated("Retrieving model. %s\n" % u.get_url(args.model, api)) u.log_message(message, log_file=session_file, console=args.verbosity) model = api.get_model(args.model) elif args.models or args.model_tag: models = model_ids[:] if model_ids and test_set and not args.evaluate: model_id = "" if len(model_ids) == 1: model_id = model_ids[0] message = u.dated("Retrieving %s. %s\n" % (u.plural("model", len(model_ids)), u.get_url(model_id, api))) u.log_message(message, log_file=session_file, console=args.verbosity) if len(model_ids) < args.max_batch_models: models = [] for model in model_ids: model = api.check_resource(model, api.get_model) models.append(model) model = models[0] else: model = api.check_resource(model_ids[0], api.get_model) models[0] = model # We check that the model is finished and get the fields if haven't got # them yet. if model and not args.evaluate and (test_set or args.black_box or args.white_box): if model['object']['status']['code'] != bigml.api.FINISHED: message = u.dated("Retrieving model. %s\n" % u.get_url(model, api)) u.log_message(message, log_file=session_file, console=args.verbosity) model = api.check_resource(model, api.get_model) if args.black_box: if not description: raise Exception("You should provide a description to publish.") model = api.update_model(model, {"private": False}) if args.white_box: if not description: raise Exception("You should provide a description to publish.") public_model = {"private": False, "white_box": True} if args.model_price: message = u.dated("Updating model. %s\n" % u.get_url(model, api)) u.log_message(message, log_file=session_file, console=args.verbosity) public_model.update(price=args.model_price) if args.cpp: message = u.dated("Updating model. %s\n" % u.get_url(model, api)) u.log_message(message, log_file=session_file, console=args.verbosity) public_model.update(credits_per_prediction=args.cpp) model = api.update_model(model, public_model) if not csv_properties: csv_properties = {'data_locale': model['object']['locale']} csv_properties.update(verbose=True) if args.user_locale: csv_properties.update(data_locale=args.user_locale) fields = Fields(model['object']['model']['fields'], **csv_properties) if model and not models: models = [model] if models and test_set and not args.evaluate: objective_field = models[0]['object']['objective_fields'] if isinstance(objective_field, list): objective_field = objective_field[0] predict(test_set, test_set_header, models, fields, output, objective_field, args.remote, api, log, args.max_batch_models, args.method, resume, args.tag, args.verbosity, session_file, args.debug) # When combine_votes flag is used, retrieve the predictions files saved # in the comma separated list of directories and combine them if votes_files: model_id = re.sub(r'.*(model_[a-f0-9]{24})__predictions\.csv$', r'\1', votes_files[0]).replace("_", "/") model = api.check_resource(model_id, api.get_model) local_model = Model(model) message = u.dated("Combining votes.\n") u.log_message(message, log_file=session_file, console=args.verbosity) u.combine_votes(votes_files, local_model.to_prediction, output, args.method) # If evaluate flag is on, create remote evaluation and save results in # json and human-readable format. if args.evaluate: if resume: resume, evaluation = u.checkpoint(u.is_evaluation_created, path, bigml.api, debug=args.debug) if not resume: message = u.dated("Evaluation not found. Resuming.\n") u.log_message(message, log_file=session_file, console=args.verbosity) if not resume: evaluation_file = open(path + '/evaluation', 'w', 0) evaluation_args = { "name": name, "description": description, "tags": args.tag } if not fields_map is None: update_map = {} for (dataset_column, model_column) in fields_map.iteritems(): update_map.update({ fields.field_id(dataset_column): fields.field_id(model_column)}) evaluation_args.update({"fields_map": update_map}) if not ((args.dataset or args.test_set) and (args.model or args.models or args.model_tag)): evaluation_args.update(out_of_bag=True, seed=SEED, sample_rate=args.sample_rate) message = u.dated("Creating evaluation.\n") u.log_message(message, log_file=session_file, console=args.verbosity) evaluation = api.create_evaluation(model, dataset, evaluation_args) u.log_message("%s\n" % evaluation['resource'], log_file=log) evaluation_file.write("%s\n" % evaluation['resource']) evaluation_file.flush() evaluation_file.close() message = u.dated("Retrieving evaluation. %s\n" % u.get_url(evaluation, api)) u.log_message(message, log_file=session_file, console=args.verbosity) evaluation = api.check_resource(evaluation, api.get_evaluation) evaluation_json = open(output + '.json', 'w', 0) evaluation_json.write(json.dumps(evaluation['object']['result'])) evaluation_json.flush() evaluation_json.close() evaluation_txt = open(output + '.txt', 'w', 0) api.pprint(evaluation['object']['result'], evaluation_txt) evaluation_txt.flush() evaluation_txt.close() # Workaround to restore windows console cp850 encoding to print the tree if sys.platform == "win32" and sys.stdout.isatty(): import locale data_locale = locale.getlocale() if not data_locale[0] is None: locale.setlocale(locale.LC_ALL, (data_locale[0], "850")) message = (u"\nGenerated files:\n\n" + unicode(u.print_tree(path, " "), "utf-8") + u"\n") else: message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n" u.log_message(message, log_file=session_file, console=args.verbosity)
def create_models(datasets, model_ids, model_args, args, api=None, path=None, session_file=None, log=None): """Create remote models """ if api is None: api = bigml.api.BigML() models = model_ids[:] existing_models = len(models) model_args_list = [] if args.dataset_off and args.evaluate: args.test_dataset_ids = datasets[:] if not args.multi_label: datasets = datasets[existing_models:] # if resuming and all models were created, there will be no datasets left if datasets: dataset = datasets[0] if isinstance(model_args, list): model_args_list = model_args if args.number_of_models > 0: message = dated("Creating %s.\n" % plural("model", args.number_of_models)) log_message(message, log_file=session_file, console=args.verbosity) single_model = args.number_of_models == 1 and existing_models == 0 # if there's more than one model the first one must contain # the entire field structure to be used as reference. query_string = (FIELDS_QS if single_model else ALL_FIELDS_QS) inprogress = [] for i in range(0, args.number_of_models): wait_for_available_tasks(inprogress, args.max_parallel_models, api.get_model, "model", query_string=query_string) if model_args_list: model_args = model_args_list[i] if args.cross_validation_rate > 0: new_seed = get_basic_seed(i + existing_models) model_args.update(seed=new_seed) # one model per dataset (--max-categories or single model) if (args.max_categories or (args.test_datasets and args.evaluate)) > 0: dataset = datasets[i] model = api.create_model(dataset, model_args) elif args.dataset_off and args.evaluate: multi_dataset = args.test_dataset_ids[:] del multi_dataset[i + existing_models] model = api.create_model(multi_dataset, model_args) else: model = api.create_model(datasets, model_args) model_id = check_resource_error(model, "Failed to create model: ") log_message("%s\n" % model_id, log_file=log) model_ids.append(model_id) inprogress.append(model_id) models.append(model) log_created_resources("models", path, model_id, open_mode='a') if args.number_of_models < 2 and args.verbosity: if bigml.api.get_status(model)['code'] != bigml.api.FINISHED: try: model = check_resource(model, api.get_model, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) models[0] = model message = dated("Model created: %s.\n" % get_url(model)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, model)
def create_pca(datasets, pca, pca_args, args, api=None, path=None, session_file=None, log=None): """Create remote pcas """ if api is None: api = bigml.api.BigML() pcas = [] pca_ids = [] if pca is not None: pcas = [pca] pca_ids = [pca] existing_pcas = len(pcas) pca_args_list = [] datasets = datasets[existing_pcas:] # if resuming and all pcas were created, there will # be no datasets left if datasets: if isinstance(pca_args, list): pca_args_list = pca_args # Only one pca per command, at present number_of_pcas = 1 message = dated("Creating %s.\n" % plural("pca", number_of_pcas)) log_message(message, log_file=session_file, console=args.verbosity) query_string = FIELDS_QS inprogress = [] for i in range(0, number_of_pcas): wait_for_available_tasks(inprogress, args.max_parallel_pcas, api, "pca") if pca_args_list: pca_args = pca_args_list[i] pca = api.create_pca(datasets, pca_args, retries=None) pca_id = check_resource_error( \ pca, "Failed to create pca: ") log_message("%s\n" % pca_id, log_file=log) pca_ids.append(pca_id) inprogress.append(pca_id) pcas.append(pca) log_created_resources("pcas", path, pca_id, mode='a') if args.verbosity: if bigml.api.get_status(pca)['code'] != bigml.api.FINISHED: try: pca = check_resource( \ pca, api.get_pca, query_string=query_string, raise_on_error=True) except Exception, exception: sys.exit("Failed to get a finished pca: %s" % str(exception)) pcas[0] = pca message = dated("PCA created: %s\n" % get_url(pca)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, pca)
def create_models(datasets, model_ids, model_args, args, api=None, path=None, session_file=None, log=None): """Create remote models """ if api is None: api = bigml.api.BigML() models = model_ids[:] existing_models = len(models) model_args_list = [] if args.dataset_off and args.evaluate: args.test_dataset_ids = datasets[:] if not args.multi_label: datasets = datasets[existing_models:] # if resuming and all models were created, there will be no datasets left if datasets: dataset = datasets[0] if isinstance(model_args, list): model_args_list = model_args if args.number_of_models > 0: message = dated("Creating %s.\n" % plural("model", args.number_of_models)) log_message(message, log_file=session_file, console=args.verbosity) single_model = args.number_of_models == 1 and existing_models == 0 # if there's more than one model the first one must contain # the entire field structure to be used as reference. query_string = (FIELDS_QS if single_model and (args.test_header \ and not args.export_fields) else ALL_FIELDS_QS) inprogress = [] for i in range(0, args.number_of_models): wait_for_available_tasks(inprogress, args.max_parallel_models, api, "model") if model_args_list: model_args = model_args_list[i] if args.cross_validation_rate > 0: new_seed = get_basic_seed(i + existing_models) model_args.update(seed=new_seed) # one model per dataset (--max-categories or single model) if (args.max_categories > 0 or (args.test_datasets and args.evaluate)): dataset = datasets[i] model = api.create_model(dataset, model_args, retries=None) elif args.dataset_off and args.evaluate: multi_dataset = args.test_dataset_ids[:] del multi_dataset[i + existing_models] model = api.create_model(multi_dataset, model_args, retries=None) else: model = api.create_model(datasets, model_args, retries=None) model_id = check_resource_error(model, "Failed to create model: ") log_message("%s\n" % model_id, log_file=log) model_ids.append(model_id) inprogress.append(model_id) models.append(model) log_created_resources("models", path, model_id, mode='a') if args.number_of_models < 2 and args.verbosity: if bigml.api.get_status(model)['code'] != bigml.api.FINISHED: try: model = check_resource(model, api.get_model, query_string=query_string, raise_on_error=True) except Exception, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) models[0] = model message = dated("Model created: %s\n" % get_url(model)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, model)