def update_anomaly(anomaly, anomaly_args, args, api=None, path=None, session_file=None): """Updates anomaly properties """ if api is None: api = bigml.api.BigML() message = dated("Updating anomaly detector. %s\n" % get_url(anomaly)) log_message(message, log_file=session_file, console=args.verbosity) anomaly = api.update_anomaly(anomaly, anomaly_args) check_resource_error(anomaly, "Failed to update anomaly: %s" % anomaly['resource']) anomaly = check_resource(anomaly, api.get_anomaly, query_string=FIELDS_QS, raise_on_error=True) if is_shared(anomaly): message = dated("Shared anomaly link. %s\n" % get_url(anomaly, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, anomaly) return anomaly
def remote_centroid(cluster, test_dataset, batch_centroid_args, args, api, resume, prediction_file=None, session_file=None, path=None, log=None): """Computes a centroid for each entry in the `test_set`. Predictions are computed remotely using the batch centroid call. """ cluster_id = bigml.api.get_cluster_id(cluster) # if resuming, try to extract dataset form log files if resume: message = u.dated("Batch centroid not found. Resuming.\n") resume, batch_centroid = c.checkpoint( c.is_batch_centroid_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if not resume: batch_centroid = create_batch_centroid( cluster_id, test_dataset, batch_centroid_args, args, api, session_file=session_file, path=path, log=log) if not args.no_csv: api.download_batch_centroid(batch_centroid, prediction_file) if args.to_dataset: batch_centroid = bigml.api.check_resource(batch_centroid, api=api) new_dataset = bigml.api.get_dataset_id( batch_centroid['object']['output_dataset_resource']) if new_dataset is not None: message = u.dated("Batch centroid dataset created: %s\n" % u.get_url(new_dataset)) u.log_message(message, log_file=session_file, console=args.verbosity) u.log_created_resources("batch_centroid_dataset", path, new_dataset, mode='a')
def update_sample(sample, sample_args, args, api=None, path=None, session_file=None): """Updates sample properties """ if api is None: api = bigml.api.BigML() message = dated("Updating sample. %s\n" % get_url(sample)) log_message(message, log_file=session_file, console=args.verbosity) sample = api.update_sample(sample, sample_args) check_resource_error(sample, "Failed to update sample: %s" % sample['resource']) sample = check_resource(sample, api.get_sample, raise_on_error=True) if is_shared(sample): message = dated("Shared sample link. %s\n" % get_url(sample, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, sample) return sample
def remote_anomaly_score(anomaly, test_dataset, batch_anomaly_score_args, args, api, resume, prediction_file=None, session_file=None, path=None, log=None): """Computes an anomaly score for each entry in the `test_set`. Predictions are computed remotely using the batch anomaly score call. """ anomaly_id = bigml.api.get_anomaly_id(anomaly) # if resuming, try to extract dataset form log files if resume: message = u.dated("Batch anomaly score not found. Resuming.\n") resume, batch_anomaly_score = c.checkpoint( c.is_batch_anomaly_score_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if not resume: batch_anomaly_score = create_batch_anomaly_score( anomaly_id, test_dataset, batch_anomaly_score_args, args, api, session_file=session_file, path=path, log=log) if not args.no_csv: api.download_batch_anomaly_score(batch_anomaly_score, prediction_file) if args.to_dataset: batch_anomaly_score = bigml.api.check_resource(batch_anomaly_score, api=api) new_dataset = bigml.api.get_dataset_id( batch_anomaly_score['object']['output_dataset_resource']) if new_dataset is not None: message = u.dated("Batch anomaly score dataset created: %s\n" % u.get_url(new_dataset)) u.log_message(message, log_file=session_file, console=args.verbosity) u.log_created_resources("batch_anomaly_score_dataset", path, new_dataset, open_mode='a')
def update_time_series(time_series, time_series_args, args, api=None, path=None, session_file=None): """Updates time-series properties """ if api is None: api = bigml.api.BigML() message = dated("Updating time-series. %s\n" % get_url(time_series)) log_message(message, log_file=session_file, console=args.verbosity) time_series = api.update_time_series(time_series, \ time_series_args) check_resource_error( time_series, "Failed to update time-series: %s" % time_series['resource']) time_series = check_resource(time_series, api.get_time_series, query_string=FIELDS_QS, raise_on_error=True) if is_shared(time_series): message = dated("Shared time-series link. %s\n" % get_url(time_series, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, time_series) return time_series
def remote_predict(model, test_dataset, batch_prediction_args, args, api, resume, prediction_file=None, session_file=None, path=None, log=None): """Computes a prediction for each entry in the `test_set`. Predictions are computed remotely using the batch predictions call. """ if args.ensemble is not None: model_or_ensemble = args.ensemble else: model_or_ensemble = bigml.api.get_model_id(model) # if resuming, try to extract dataset form log files if resume: message = u.dated("Batch prediction not found. Resuming.\n") resume, batch_prediction = c.checkpoint( c.is_batch_prediction_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if not resume: batch_prediction = create_batch_prediction( model_or_ensemble, test_dataset, batch_prediction_args, args, api, session_file=session_file, path=path, log=log) if not args.no_csv: api.download_batch_prediction(batch_prediction, prediction_file) if args.to_dataset: batch_prediction = bigml.api.check_resource(batch_prediction, api=api) new_dataset = bigml.api.get_dataset_id( batch_prediction['object']['output_dataset_resource']) if new_dataset is not None: message = u.dated("Batch prediction dataset created: %s\n" % u.get_url(new_dataset)) u.log_message(message, log_file=session_file, console=args.verbosity) u.log_created_resources("batch_prediction_dataset", path, new_dataset, mode='a')
def create_ensembles(datasets, ensemble_ids, ensemble_args, args, number_of_ensembles=1, api=None, path=None, session_file=None, log=None): """Create ensembles from input data """ if api is None: api = bigml.api.BigML() ensembles = ensemble_ids[:] existing_ensembles = len(ensembles) model_ids = [] ensemble_args_list = [] if isinstance(ensemble_args, list): ensemble_args_list = ensemble_args if args.dataset_off and args.evaluate: args.test_dataset_ids = datasets[:] if not args.multi_label: datasets = datasets[existing_ensembles:] if number_of_ensembles > 0: message = dated("Creating %s.\n" % plural("ensemble", number_of_ensembles)) log_message(message, log_file=session_file, console=args.verbosity) inprogress = [] for i in range(0, number_of_ensembles): wait_for_available_tasks(inprogress, args.max_parallel_ensembles, api, "ensemble", wait_step=args.number_of_models) if ensemble_args_list: ensemble_args = ensemble_args_list[i] if args.dataset_off and args.evaluate: multi_dataset = args.test_dataset_ids[:] del multi_dataset[i + existing_ensembles] ensemble = api.create_ensemble(multi_dataset, ensemble_args, retries=None) else: ensemble = api.create_ensemble(datasets, ensemble_args, retries=None) ensemble_id = check_resource_error(ensemble, "Failed to create ensemble: ") log_message("%s\n" % ensemble_id, log_file=log) ensemble_ids.append(ensemble_id) inprogress.append(ensemble_id) ensembles.append(ensemble) log_created_resources("ensembles", path, ensemble_id, mode='a') models, model_ids = retrieve_ensembles_models(ensembles, api, path) if number_of_ensembles < 2 and args.verbosity: message = dated("Ensemble created: %s\n" % get_url(ensemble)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, ensemble) return ensembles, ensemble_ids, models, model_ids
def update_logistic_regression(logistic_regression, logistic_regression_args, args, api=None, path=None, session_file=None): """Updates logistic regression properties """ if api is None: api = bigml.api.BigML() message = dated("Updating logistic regression. %s\n" % get_url(logistic_regression)) log_message(message, log_file=session_file, console=args.verbosity) logistic_regression = api.update_logistic_regression(logistic_regression, \ logistic_regression_args) check_resource_error( logistic_regression, "Failed to update logistic regression: %s" % logistic_regression['resource']) logistic_regression = check_resource(logistic_regression, api.get_logistic_regression, query_string=FIELDS_QS, raise_on_error=True) if is_shared(logistic_regression): message = dated("Shared logistic regression link. %s\n" % get_url(logistic_regression, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, logistic_regression) return logistic_regression
def remote_centroid(cluster, test_dataset, batch_centroid_args, args, api, resume, prediction_file=None, session_file=None, path=None, log=None): """Computes a centroid for each entry in the `test_set`. Predictions are computed remotely using the batch centroid call. """ cluster_id = bigml.api.get_cluster_id(cluster) # if resuming, try to extract dataset form log files if resume: message = u.dated("Batch centroid not found. Resuming.\n") resume, batch_centroid = c.checkpoint( c.is_batch_centroid_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if not resume: batch_centroid = create_batch_centroid( cluster_id, test_dataset, batch_centroid_args, args, api, session_file=session_file, path=path, log=log) if not args.no_csv: file_name = api.download_batch_centroid(batch_centroid, prediction_file) if file_name is None: sys.exit("Failed downloading CSV.") if args.to_dataset: batch_centroid = bigml.api.check_resource(batch_centroid, api=api) new_dataset = bigml.api.get_dataset_id( batch_centroid['object']['output_dataset_resource']) if new_dataset is not None: message = u.dated("Batch centroid dataset created: %s\n" % u.get_url(new_dataset)) u.log_message(message, log_file=session_file, console=args.verbosity) u.log_created_resources("batch_centroid_dataset", path, new_dataset, mode='a')
def update_deepnet(deepnet, deepnet_args, args, api=None, path=None, session_file=None): """Updates deepnet properties """ if api is None: api = bigml.api.BigML() message = dated("Updating deepnet. %s\n" % get_url(deepnet)) log_message(message, log_file=session_file, console=args.verbosity) deepnet = api.update_deepnet(deepnet, deepnet_args) check_resource_error(deepnet, "Failed to update deepnet: %s" % deepnet['resource']) deepnet = check_resource(deepnet, api.get_deepnet, query_string=FIELDS_QS, raise_on_error=True) if is_shared(deepnet): message = dated("Shared deepnet link. %s\n" % get_url(deepnet, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, deepnet) return deepnet
def export_dataset(dataset, api, args, resume, session_file=None, path=None): """Exports the dataset to a CSV file given by the user or a filename based on the dataset id by default. """ filename = csv_name(args.to_csv, path, dataset) if resume: resume = c.checkpoint( c.is_dataset_exported, filename, debug=args.debug) if not resume: message = u.dated("No dataset exported. Resuming.\n") u.log_message(message, log_file=session_file, console=args.verbosity) else: message = u.dated("Exporting dataset to CSV file: %s\n" % filename) u.log_message(message, log_file=session_file, console=args.verbosity) if not resume: file_name = api.download_dataset(dataset, filename=filename) if file_name is None: sys.exit("Failed downloading CSV.") return resume
def update_topic_model(topic_model, topic_model_args, args, api=None, path=None, session_file=None): """Updates topic model properties """ if api is None: api = bigml.api.BigML() message = dated("Updating topic model. %s\n" % get_url(topic_model)) log_message(message, log_file=session_file, console=args.verbosity) topic_model = api.update_topic_model(topic_model, \ topic_model_args) check_resource_error( topic_model, "Failed to update topic model: %s" % topic_model['resource']) topic_model = check_resource(topic_model, api.get_topic_model, query_string=FIELDS_QS, raise_on_error=True) if is_shared(topic_model): message = dated("Shared topic model link. %s\n" % get_url(topic_model, shared=True)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, topic_model) return topic_model
def create_samples(datasets, sample_ids, sample_args, args, api=None, path=None, session_file=None, log=None): """Create remote samples """ if api is None: api = bigml.api.BigML() samples = sample_ids[:] existing_samples = len(samples) sample_args_list = [] datasets = datasets[existing_samples:] # if resuming and all samples were created, there will be no datasets left if datasets: if isinstance(sample_args, list): sample_args_list = sample_args # Only one sample per command, at present number_of_samples = 1 max_parallel_samples = 1 message = dated("Creating %s.\n" % plural("sample", number_of_samples)) log_message(message, log_file=session_file, console=args.verbosity) inprogress = [] for i in range(0, number_of_samples): wait_for_available_tasks(inprogress, max_parallel_samples, api, "sample") if sample_args_list: sample_args = sample_args_list[i] sample = api.create_sample(datasets[i], sample_args, retries=None) sample_id = check_resource_error(sample, "Failed to create sample: ") log_message("%s\n" % sample_id, log_file=log) sample_ids.append(sample_id) inprogress.append(sample_id) samples.append(sample) log_created_resources("samples", path, sample_id, mode='a') if args.verbosity: if bigml.api.get_status(sample)['code'] != bigml.api.FINISHED: try: sample = check_resource(sample, api.get_sample, raise_on_error=True) except Exception, exception: sys.exit("Failed to get a finished sample: %s" % str(exception)) samples[0] = sample message = dated("Sample created: %s\n" % get_url(sample)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, sample)
def create_models(dataset, model_ids, model_args, args, api=None, path=None, session_file=None, log=None): """Create remote models """ if api is None: api = bigml.api.BigML() models = model_ids[:] existing_models = len(models) model_args_list = [] if isinstance(model_args, list): model_args_list = model_args if args.number_of_models > 0: message = dated("Creating %s.\n" % plural("model", args.number_of_models)) log_message(message, log_file=session_file, console=args.verbosity) single_model = args.number_of_models == 1 and existing_models == 0 # if there's more than one model the first one must contain # the entire field structure to be used as reference. query_string = (FIELDS_QS if single_model else ALL_FIELDS_QS) for i in range(0, args.number_of_models): if i % args.max_parallel_models == 0 and i > 0: try: models[i - 1] = check_resource( models[i - 1], api.get_model, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) if model_args_list: model_args = model_args_list[i] if args.cross_validation_rate > 0: new_seed = get_basic_seed(i + existing_models) model_args.update(seed=new_seed) model = api.create_model(dataset, model_args) model_id = check_resource_error(model, "Failed to create model: ") log_message("%s\n" % model_id, log_file=log) model_ids.append(model_id) models.append(model) log_created_resources("models", path, model_id, open_mode='a') if args.number_of_models < 2 and args.verbosity: if bigml.api.get_status(model)['code'] != bigml.api.FINISHED: try: model = check_resource(model, api.get_model, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) models[0] = model message = dated("Model created: %s.\n" % get_url(model)) log_message(message, log_file=session_file, console=args.verbosity)
def create_models(dataset, model_ids, model_args, args, api=None, path=None, session_file=None, log=None): """Create remote models """ if api is None: api = bigml.api.BigML() models = model_ids[:] existing_models = len(models) last_model = None if args.number_of_models > 0: message = dated("Creating %s.\n" % plural("model", args.number_of_models)) log_message(message, log_file=session_file, console=args.verbosity) for i in range(0, args.number_of_models): if i % args.max_parallel_models == 0 and i > 0: try: models[i - 1] = check_resource(models[i - 1], api.get_model, query_string=FIELDS_QS) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) if args.cross_validation_rate > 0: new_seed = get_basic_seed(i + existing_models) model_args.update(seed=new_seed) model = api.create_model(dataset, model_args) log_message("%s\n" % model['resource'], log_file=log) model_ids.append(model['resource']) models.append(model) log_created_resources("models", path, bigml.api.get_model_id(model), open_mode='a') check_resource_error( model, "Failed to create model %s:" % model['resource']) if args.number_of_models < 2 and args.verbosity: if bigml.api.get_status(model)['code'] != bigml.api.FINISHED: try: model = check_resource(model, api.get_model, query_string=FIELDS_QS) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) models[0] = model message = dated("Model created: %s.\n" % get_url(model)) log_message(message, log_file=session_file, console=args.verbosity)
def remote_anomaly_score(anomaly, test_dataset, batch_anomaly_score_args, args, api, resume, prediction_file=None, session_file=None, path=None, log=None): """Computes an anomaly score for each entry in the `test_set`. Predictions are computed remotely using the batch anomaly score call. """ anomaly_id = bigml.api.get_anomaly_id(anomaly) # if resuming, try to extract dataset form log files if resume: message = u.dated("Batch anomaly score not found. Resuming.\n") resume, batch_anomaly_score = c.checkpoint( c.is_batch_anomaly_score_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if not resume: batch_anomaly_score = create_batch_anomaly_score( anomaly_id, test_dataset, batch_anomaly_score_args, args, api, session_file=session_file, path=path, log=log) if not args.no_csv: file_name = api.download_batch_anomaly_score(batch_anomaly_score, prediction_file) if file_name is None: sys.exit("Failed downloading CSV.") if args.to_dataset: batch_anomaly_score = bigml.api.check_resource(batch_anomaly_score, api=api) new_dataset = bigml.api.get_dataset_id( batch_anomaly_score['object']['output_dataset_resource']) if new_dataset is not None: message = u.dated("Batch anomaly score dataset created: %s\n" % u.get_url(new_dataset)) u.log_message(message, log_file=session_file, console=args.verbosity) u.log_created_resources("batch_anomaly_score_dataset", path, new_dataset, mode='a')
def create_clusters(datasets, cluster_ids, cluster_args, args, api=None, path=None, session_file=None, log=None): """Create remote clusters """ if api is None: api = bigml.api.BigML() clusters = cluster_ids[:] existing_clusters = len(clusters) cluster_args_list = [] datasets = datasets[existing_clusters:] # if resuming and all clusters were created, there will be no datasets left if datasets: if isinstance(cluster_args, list): cluster_args_list = cluster_args # Only one cluster per command, at present number_of_clusters = 1 message = dated("Creating %s.\n" % plural("cluster", number_of_clusters)) log_message(message, log_file=session_file, console=args.verbosity) query_string = FIELDS_QS inprogress = [] for i in range(0, number_of_clusters): wait_for_available_tasks(inprogress, args.max_parallel_clusters, api, "cluster") if cluster_args_list: cluster_args = cluster_args_list[i] cluster = api.create_cluster(datasets, cluster_args, retries=None) cluster_id = check_resource_error(cluster, "Failed to create cluster: ") log_message("%s\n" % cluster_id, log_file=log) cluster_ids.append(cluster_id) inprogress.append(cluster_id) clusters.append(cluster) log_created_resources("clusters", path, cluster_id, mode='a') if args.verbosity: if bigml.api.get_status(cluster)['code'] != bigml.api.FINISHED: try: cluster = check_resource(cluster, api.get_cluster, query_string=query_string, raise_on_error=True) except Exception, exception: sys.exit("Failed to get a finished cluster: %s" % str(exception)) clusters[0] = cluster message = dated("Cluster created: %s\n" % get_url(cluster)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, cluster)
def remote_prediction(model, test_dataset, batch_prediction_args, args, api, resume, prediction_file=None, session_file=None, path=None, log=None): """Computes a prediction for each entry in the `test_set`. Predictions are computed remotely using the batch prediction call. """ model_id = bigml.api.get_resource_id( \ model) batch_prediction_args.update({"probability": True, "confidence": False}) # if resuming, try to extract dataset form log files if resume: message = u.dated("Batch prediction not found. Resuming.\n") resume, batch_prediction = c.checkpoint(c.is_batch_prediction_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if not resume: batch_prediction = create_batch_prediction(model_id, test_dataset, batch_prediction_args, args, api, session_file=session_file, path=path, log=log) if not args.no_csv: file_name = api.download_batch_prediction(batch_prediction, prediction_file) if file_name is None: sys.exit("Failed downloading CSV.") if args.to_dataset: batch_prediction = bigml.api.check_resource(batch_prediction, api=api) new_dataset = bigml.api.get_dataset_id( batch_prediction['object']['output_dataset_resource']) if new_dataset is not None: message = u.dated("Batch prediction dataset created: %s\n" % u.get_url(new_dataset)) u.log_message(message, log_file=session_file, console=args.verbosity) u.log_created_resources("batch_prediction_dataset", path, new_dataset, mode='a')
def create_fusion(models, fusion, fusion_args, args, api=None, path=None, session_file=None, log=None): """Create remote fusion """ if api is None: api = bigml.api.BigML() fusions = [] fusion_ids = [] if fusion is not None: fusions = [fusion] fusion_ids = [fusion] # if resuming and all fusions were created if models: # Only one fusion per command, at present message = dated("Creating fusion.\n") log_message(message, log_file=session_file, console=args.verbosity) query_string = FIELDS_QS inprogress = [] wait_for_available_tasks(inprogress, args.max_parallel_fusions, api, "fusion") fusion = api.create_fusion(models, fusion_args, retries=None) fusion_id = check_resource_error( \ fusion, "Failed to create fusion: ") log_message("%s\n" % fusion_id, log_file=log) fusion_ids.append(fusion_id) inprogress.append(fusion_id) fusions.append(fusion) log_created_resources("fusions", path, fusion_id, mode='a') if args.verbosity: if bigml.api.get_status(fusion)['code'] != bigml.api.FINISHED: try: fusion = check_resource( \ fusion, api.get_fusion, query_string=query_string, raise_on_error=True) except Exception, exception: sys.exit("Failed to get a finished fusion: %s" % str(exception)) fusions[0] = fusion message = dated("Fusion created: %s\n" % get_url(fusion)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, fusion)
def create_ensembles(datasets, ensemble_ids, ensemble_args, args, number_of_ensembles=1, api=None, path=None, session_file=None, log=None): """Create ensembles from input data """ if api is None: api = bigml.api.BigML() ensembles = ensemble_ids[:] model_ids = [] ensemble_args_list = [] if isinstance(ensemble_args, list): ensemble_args_list = ensemble_args if number_of_ensembles > 0: message = dated("Creating %s.\n" % plural("ensemble", number_of_ensembles)) log_message(message, log_file=session_file, console=args.verbosity) query_string = ALL_FIELDS_QS inprogress = [] for i in range(0, number_of_ensembles): wait_for_available_tasks(inprogress, args.max_parallel_ensembles, api.get_ensemble, "ensemble", query_string=query_string, wait_step=args.number_of_models) if ensemble_args_list: ensemble_args = ensemble_args_list[i] ensemble = api.create_ensemble(datasets, ensemble_args) ensemble_id = check_resource_error(ensemble, "Failed to create ensemble: ") log_message("%s\n" % ensemble_id, log_file=log) ensemble_ids.append(ensemble_id) inprogress.append(ensemble_id) ensembles.append(ensemble) log_created_resources("ensembles", path, ensemble_id, open_mode='a') models, model_ids = retrieve_ensembles_models(ensembles, api, path) if number_of_ensembles < 2 and args.verbosity: message = dated("Ensemble created: %s.\n" % get_url(ensemble)) log_message(message, log_file=session_file, console=args.verbosity) if args.reports: report(args.reports, path, ensemble) return ensembles, ensemble_ids, models, model_ids
def library_processing(api, args, session_file=None, path=None, log=None): """Creating or retrieving a library """ library = None resume = args.resume if args.code_file or args.code: # If resuming, try to extract args.library form log files if resume: message = u.dated("Library not found. Resuming.\n") resume, library = c.checkpoint(c.is_library_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if not resume: args.resume = resume if args.code_file: try: with open(args.code_file) as code_file: source_code = code_file.read() except IOError: sys.exit("Failed to find the source code file: %s" % args.code_file) else: source_code = args.code # Check if there's a created project for it args.project_id = pp.project_processing(api, args, resume, session_file=session_file, path=path, log=log) # Check if we are upgrading if args.upgrade: library = u.get_last_resource("library", api, build_query_string(args)) log_created_resources("library", path, library, mode='a') message = u.dated("Library found: %s \n" " (library ID: %s)\n" % (args.name, library)) u.log_message(message, log_file=session_file, console=args.verbosity) if library is None: library_args = rl.set_library_args(args) add_version_tag(library_args, args.name) library = rl.create_library(source_code, library_args, args, api, path, session_file, log) return library
def remote_predict(model, test_dataset, batch_prediction_args, args, api, resume, prediction_file=None, session_file=None, path=None, log=None): """Computes a prediction for each entry in the `test_set`. Predictions are computed remotely using the batch predictions call. """ if args.ensemble is not None: model_or_ensemble = args.ensemble else: model_or_ensemble = bigml.api.get_model_id(model) # if resuming, try to extract dataset form log files if resume: message = u.dated("Batch prediction not found. Resuming.\n") resume, batch_prediction = c.checkpoint(c.is_batch_prediction_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if not resume: batch_prediction = create_batch_prediction(model_or_ensemble, test_dataset, batch_prediction_args, args, api, session_file=session_file, path=path, log=log) if not args.no_csv: api.download_batch_prediction(batch_prediction, prediction_file) if args.to_dataset: batch_prediction = bigml.api.check_resource(batch_prediction, api=api) new_dataset = bigml.api.get_dataset_id( batch_prediction['object']['output_dataset_resource']) if new_dataset is not None: message = u.dated("Batch prediction dataset created: %s\n" % u.get_url(new_dataset)) u.log_message(message, log_file=session_file, console=args.verbosity) u.log_created_resources("batch_prediction_dataset", path, new_dataset, mode='a')
def delete_resources(command_args, api): """Deletes the resources selected by the user given options """ if command_args.predictions is None: path = a.NOW else: path = u.check_dir(command_args.predictions) session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG) message = u.dated("Retrieving objects to delete.\n") u.log_message(message, log_file=session_file, console=command_args.verbosity) delete_list = [] if command_args.delete_list: delete_list = map(str.strip, command_args.delete_list.split(',')) if command_args.delete_file: if not os.path.exists(command_args.delete_file): sys.exit("File %s not found" % command_args.delete_file) delete_list.extend([line for line in open(command_args.delete_file, "r")]) resource_selectors = [ (command_args.source_tag, api.list_sources), (command_args.dataset_tag, api.list_datasets), (command_args.model_tag, api.list_models), (command_args.prediction_tag, api.list_predictions), (command_args.evaluation_tag, api.list_evaluations), (command_args.ensemble_tag, api.list_ensembles), (command_args.batch_prediction_tag, api.list_batch_predictions)] for selector, api_call in resource_selectors: query_string = None if command_args.all_tag: query_string = "tags__in=%s" % command_args.all_tag elif selector: query_string = "tags__in=%s" % selector if query_string: delete_list.extend(u.list_ids(api_call, query_string)) message = u.dated("Deleting objects.\n") u.log_message(message, log_file=session_file, console=command_args.verbosity) message = "\n".join(delete_list) u.log_message(message, log_file=session_file) u.delete(api, delete_list) if sys.platform == "win32" and sys.stdout.isatty(): message = (u"\nGenerated files:\n\n" + unicode(u.print_tree(path, " "), "utf-8") + u"\n") else: message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n" u.log_message(message, log_file=session_file, console=command_args.verbosity)
def create_models(dataset, model_ids, model_args, args, api=None, path=None, session_file=None, log=None): """Create remote models """ if api is None: api = bigml.api.BigML() models = model_ids[:] existing_models = len(models) last_model = None if args.number_of_models > 0: message = dated("Creating %s.\n" % plural("model", args.number_of_models)) log_message(message, log_file=session_file, console=args.verbosity) for i in range(0, args.number_of_models): if i % args.max_parallel_models == 0 and i > 0: try: models[i - 1] = check_resource( models[i - 1], api.get_model, query_string=FIELDS_QS) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) if args.cross_validation_rate > 0: new_seed = get_basic_seed(i + existing_models) model_args.update(seed=new_seed) model = api.create_model(dataset, model_args) log_message("%s\n" % model['resource'], log_file=log) model_ids.append(model['resource']) models.append(model) log_created_resources("models", path, bigml.api.get_model_id(model), open_mode='a') check_resource_error(model, "Failed to create model %s:" % model['resource']) if args.number_of_models < 2 and args.verbosity: if bigml.api.get_status(model)['code'] != bigml.api.FINISHED: try: model = check_resource(model, api.get_model, query_string=FIELDS_QS) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) models[0] = model message = dated("Model created: %s.\n" % get_url(model)) log_message(message, log_file=session_file, console=args.verbosity)
def library_processing(api, args, session_file=None, path=None, log=None): """Creating or retrieving a library """ library = None resume = args.resume if args.code_file or args.code: # If resuming, try to extract args.library form log files if resume: message = u.dated("Library not found. Resuming.\n") resume, library = c.checkpoint( c.is_library_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if not resume: args.resume = resume if args.code_file: try: with open(args.code_file) as code_file: source_code = code_file.read() except IOError: sys.exit("Failed to find the source code file: %s" % args.code_file) else: source_code = args.code # Check if there's a created project for it args.project_id = pp.project_processing( api, args, resume, session_file=session_file, path=path, log=log) # Check if we are upgrading if args.upgrade: library = u.get_last_resource("library", api, build_query_string(args)) r.log_created_resources("library", path, library, mode='a') message = u.dated("Library found: %s \n" " (library ID: %s)\n" % (args.name, library)) u.log_message(message, log_file=session_file, console=args.verbosity) if library is None: library_args = r.set_library_args(args) add_version_tag(library_args, args.name) library = r.create_library(source_code, library_args, args, api, path, session_file, log) return library
def update_project(project_args, args, api=None, session_file=None, log=None): """Updates project properties """ if api is None: api = bigml.api.BigML() message = dated("Updating project attributes.\n") log_message(message, log_file=session_file, console=args.verbosity) project = api.update_project(args.project_id, project_args) check_resource_error(project, "Failed to update project: %s" % project['resource']) message = dated("Project \"%s\" has been updated.\n" % project['resource']) log_message(message, log_file=session_file, console=args.verbosity) log_message("%s\n" % args.project_id, log_file=log) return project
def prediction(models, fields, args, session_file=None): """Computes a supervised model prediction for each entry in the `test_set`. """ test_set = args.test_set test_set_header = args.test_header output = args.predictions test_reader = TestReader(test_set, test_set_header, fields, None, test_separator=args.test_separator) with UnicodeWriter(output, lineterminator="\n") as output: # columns to exclude if input_data is added to the prediction field exclude = use_prediction_headers(args.prediction_header, output, test_reader, fields, args, args.objective_field, quality="probability") # Local predictions: Predictions are computed locally message = u.dated("Creating local predictions.\n") u.log_message(message, log_file=session_file, console=args.verbosity) local_prediction(models, test_reader, output, args, exclude=exclude) test_reader.close()
def create_execution(execution_args, args, api=None, path=None, session_file=None, log=None): """Creates remote execution """ message = dated("Creating execution.\n") log_message(message, log_file=session_file, console=args.verbosity) scripts = args.script_ids if args.script_ids else args.script execution = api.create_execution(scripts, execution_args) log_created_resources("execution", path, bigml.api.get_execution_id(execution), mode='a') execution_id = check_resource_error(execution, "Failed to create execution: ") try: execution = check_resource(execution, api.get_execution, raise_on_error=True) except Exception, exception: sys.exit("Failed to get a finished execution: %s" % str(exception))
def pca_processing(datasets, pca, \ pca_ids, api, args, resume, fields=None, \ session_file=None, path=None, log=None): """Creates or retrieves pca from the input data """ # If we have a dataset but not a model, we create the model if the no_model # flag hasn't been set up. if datasets and not (has_pca(args) or \ args.no_pca): pca_ids = [] pcas = [] # Only 1 pca per bigmler command at present number_of_pcas = 1 if resume: resume, pca_ids = c.checkpoint( \ c.are_pcas_created, path, \ number_of_pcas, debug=args.debug) if not resume: message = u.dated("Found %s pcas out of %s." " Resuming.\n" % (len(pca_ids), number_of_pcas)) u.log_message(message, log_file=session_file, console=args.verbosity) pcas = pca_ids number_of_pcas -= len(pca_ids) args.exclude_fields = [] if args.exclude_objective: dataset = datasets[0] fields = Fields(dataset) objective_id = \ fields.fields_by_column_number[fields.objective_field] args.exclude_fields = [objective_id] pca_args = r.set_pca_args( \ args, fields=fields, \ pca_fields=args.pca_fields_) pca = \ r.create_pca( \ datasets, pca, pca_args, \ args, api, path, session_file, log) # If a pca is provided, we use it. elif args.pca: pca_ids = [args.pca] pca = pca_ids[0] elif args.pca or args.pca_tag: pca = pca_ids[0] # If we are going to create projections, we must retrieve the pca if pca_ids and (args.test_set or args.export_fields): pca = \ r.get_pca(pca, args, api, session_file) return pca, resume
def ensemble_processing(datasets, api, args, resume, fields=None, session_file=None, path=None, log=None): """Creates an ensemble of models from the input data """ ensembles = [] ensemble_ids = [] number_of_ensembles = len(datasets) if resume: resume, ensemble_ids = c.checkpoint( c.are_ensembles_created, path, number_of_ensembles, debug=args.debug) if not resume: message = u.dated("Found %s ensembles out of %s. Resuming.\n" % (len(ensemble_ids), number_of_ensembles)) u.log_message(message, log_file=session_file, console=args.verbosity) ensembles = ensemble_ids number_of_ensembles -= len(ensemble_ids) if number_of_ensembles > 0: ensemble_args = r.set_ensemble_args(args, fields=fields) ensembles, ensemble_ids, models, model_ids = r.create_ensembles( datasets, ensembles, ensemble_args, args, api=api, path=path, number_of_ensembles=number_of_ensembles, session_file=session_file, log=log) return ensembles, ensemble_ids, models, model_ids, resume
def evaluate(model, dataset, name, description, fields, fields_map, output, api, args, resume, session_file=None, path=None, log=None): """Evaluates a model or an ensemble with the given dataset """ if resume: message = u.dated("Evaluation not found. Resuming.\n") resume, evaluation = c.checkpoint( c.is_evaluation_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if not resume: evaluation_args = r.set_evaluation_args(name, description, args, fields, fields_map) if args.ensemble: model_or_ensemble = args.ensemble else: model_or_ensemble = model evaluation = r.create_evaluation(model_or_ensemble, dataset, evaluation_args, args, api, path, session_file, log) evaluation = r.get_evaluation(evaluation, api, args.verbosity, session_file) r.save_evaluation(evaluation, output, api) return resume
def execution_processing(api, args, session_file=None, path=None, log=None): """Creating or retrieving an execution """ execution = None resume = args.resume if args.script or args.scripts: # If resuming, try to extract args.execution form log files if resume: message = u.dated("Execution not found. Resuming.\n") resume, args.execution = c.checkpoint( c.is_execution_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if not resume: args.resume = resume # Check if there's a created project for it args.project_id = pp.project_processing( api, args, resume, session_file=session_file, path=path, log=log) execution_args = r.set_execution_args(args) execution = r.create_execution(execution_args, args, api, path, session_file, log) # If a source is provided either through the command line or in resume # steps, we use it. elif args.execution: execution = bigml.api.get_execution_id(args.execution) return execution
def test_source_processing(api, args, resume, name=None, csv_properties=None, session_file=None, path=None, log=None): """Creating or retrieving a test data source from input arguments """ test_source = None fields = None if csv_properties is None: csv_properties = {} if args.test_set and args.remote: # If resuming, try to extract args.source form log files if resume: message = u.dated("Test source not found. Resuming.\n") resume, args.test_source = c.checkpoint( c.is_source_created, path, suffix="_test", debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if not resume: source_args = r.set_source_args(args, name=name, data_set_header=args.test_header) test_source = r.create_source(args.test_set, source_args, args, api, path, session_file, log, source_type="test") # If a source is provided either through the command line or in resume # steps, we use it. elif args.test_source: test_source = bigml.api.get_source_id(args.test_source) # If we already have source, we check that is finished, extract the # fields, and update them if needed. if test_source: test_source = r.get_source(test_source, api, args.verbosity, session_file) if 'source_parser' in test_source['object']: source_parser = test_source['object']['source_parser'] if 'missing_tokens' in source_parser: csv_properties['missing_tokens'] = ( source_parser['missing_tokens']) if 'locale' in source_parser: csv_properties['data_locale'] = source_parser['locale'] if (args.user_locale is not None and bigml_locale(args.user_locale) == source_parser['locale']): args.user_locale = None fields = Fields(test_source['object']['fields'], **csv_properties) if (args.field_attributes_ or args.types_ or args.user_locale or args.json_args.get('source')): # avoid updating project_id in source project_id, args.project_id = args.project_id, None test_source_args = r.set_source_args(args, fields=fields) test_source = r.update_source(test_source, test_source_args, args, api, session_file) args.project_id = project_id fields = Fields(test_source['object']['fields'], **csv_properties) return test_source, resume, csv_properties, fields
def alternative_dataset_processing( dataset_or_source, suffix, dataset_args, api, args, resume, session_file=None, path=None, log=None ): """Creates a dataset. Used in splits to generate train and test datasets """ alternative_dataset = None # if resuming, try to extract dataset form log files if resume: message = u.dated("Dataset not found. Resuming.\n") resume, alternative_dataset = c.checkpoint( c.is_dataset_created, path, "_%s" % suffix, debug=args.debug, message=message, log_file=session_file, console=args.verbosity, ) if alternative_dataset is None: alternative_dataset = r.create_dataset( dataset_or_source, dataset_args, args, api, path, session_file, log, suffix ) if alternative_dataset: alternative_dataset = r.get_dataset(alternative_dataset, api, args.verbosity, session_file) return alternative_dataset, resume
def create_library(source_code, library_args, args, api=None, path=None, session_file=None, log=None): """Creates remote library """ if api is None: api = bigml.api.BigML() message = dated("Creating library \"%s\".\n" % library_args["name"]) log_message(message, log_file=session_file, console=args.verbosity) library = api.create_library(source_code, library_args) log_created_resources("library", path, bigml.api.get_library_id(library), mode='a') library_id = check_resource_error(library, "Failed to create library: ") try: library = check_resource(library, api.get_library, raise_on_error=True) except Exception, exception: sys.exit("Failed to get a compiled library: %s" % str(exception))
def project_processing(api, args, resume, session_file=None, path=None, log=None, create=False): """Creating or retrieving a project from input arguments """ # if no project info given by the user, we skip project processing and no # project will be assigned if args.project is None and args.project_id is None: return None project_id = None if args.project: # If resuming, try to extract args.project_id form log files if resume: message = u.dated("Project not found. Resuming.\n") resume, project_id = c.checkpoint( c.is_project_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) elif not create: project_id = r.get_project_by_name( args.project, api=api, verbosity=args.verbosity, session_file=session_file) elif args.project_id: project_id = bigml.api.get_project_id(args.project_id) # If no project is found by that name, we create a new one. if project_id is None: project_args = r.set_project_args(args, name=args.project) project = r.create_project( project_args, args, api, session_file, path, log) project_id = project['resource'] return project_id
def ensemble_processing(dataset, objective_field, fields, api, args, resume, name=None, description=None, model_fields=None, session_file=None, path=None, log=None): """Creates an ensemble of models from the input data """ ensembles = [] number_of_ensembles = 1 if resume: message = u.dated("Ensemble not found. Resuming.\n") resume, ensembles = c.checkpoint( c.are_ensembles_created, path, number_of_ensembles, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) try: ensemble = ensembles[0] except IndexError: ensemble = None if ensemble is None: ensemble_args = r.set_ensemble_args(name, description, args, model_fields, objective_field, fields) ensembles, ensemble_ids, models, model_ids = r.create_ensembles( dataset, ensembles, ensemble_args, args, api=api, path=path, session_file=session_file, log=log) return ensembles, ensemble_ids, models, model_ids, resume
def evaluations_process(time_series_set, datasets, fields, dataset_fields, api, args, resume, session_file=None, path=None, log=None, objective_field=None): """Evaluates time-series against datasets """ existing_evaluations = 0 evaluations = [] number_of_evaluations = len(time_series_set) if resume: resume, evaluations = c.checkpoint(c.are_evaluations_created, path, number_of_evaluations, debug=args.debug) if not resume: existing_evaluations = len(evaluations) message = u.dated("Found %s evaluations from %s. Resuming.\n" % (existing_evaluations, number_of_evaluations)) number_of_evaluations -= existing_evaluations u.log_message(message, log_file=session_file, console=args.verbosity) if not resume: evaluation_args = r.set_evaluation_args(args, fields, dataset_fields) evaluations.extend(r.create_evaluations( time_series_set, datasets, evaluation_args, args, api, path=path, session_file=session_file, log=log, existing_evaluations=existing_evaluations)) return evaluations, resume
def source_processing(training_set, test_set, training_set_header, test_set_header, api, args, resume, name=None, description=None, csv_properties=None, field_attributes=None, types=None, session_file=None, path=None, log=None): """Creating or retrieving a data source from input arguments """ source = None fields = None if (training_set or (args.evaluate and test_set)): # If resuming, try to extract args.source form log files if resume: message = u.dated("Source not found. Resuming.\n") resume, args.source = c.checkpoint( c.is_source_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) # If neither a previous source, dataset or model are provided. # we create a new one. Also if --evaluate and test data are provided # we create a new dataset to test with. data_set, data_set_header = r.data_to_source(training_set, test_set, training_set_header, test_set_header, args) if data_set is not None: source_args = r.set_source_args(data_set_header, name, description, args) source = r.create_source(data_set, source_args, args, api, path, session_file, log) # If a source is provided either through the command line or in resume # steps, we use it. elif args.source: source = bigml.api.get_source_id(args.source) # If we already have source, we check that is finished, extract the # fields, and update them if needed. if source: source = r.get_source(source, api, args.verbosity, session_file) if 'source_parser' in source['object']: source_parser = source['object']['source_parser'] if 'missing_tokens' in source_parser: csv_properties['missing_tokens'] = ( source_parser['missing_tokens']) if 'data_locale' in source_parser: csv_properties['data_locale'] = source_parser['locale'] fields = Fields(source['object']['fields'], **csv_properties) if field_attributes: source = r.update_source_fields(source, field_attributes, fields, api, args.verbosity, session_file) if types: source = r.update_source_fields(source, types, fields, api, args.verbosity, session_file) if field_attributes or types: fields = Fields(source['object']['fields'], **csv_properties) return source, resume, csv_properties, fields
def library_processing(api, args, session_file=None, path=None, log=None): """Creating or retrieving a library """ library = None resume = args.resume if args.code_file or args.code: # If resuming, try to extract args.library form log files if resume: message = u.dated("Library not found. Resuming.\n") resume, library = c.checkpoint( c.is_library_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if not resume: args.resume = resume if args.code_file: try: with open(args.code_file) as code_file: source_code = code_file.read() except IOError: sys.exit("Failed to find the source code file: %s" % args.code_file) else: source_code = args.code # Check if there's a created project for it args.project_id = pp.project_processing( api, args, resume, session_file=session_file, path=path, log=log) library_args = r.set_library_args(args) library = r.create_library(source_code, library_args, args, api, path, session_file, log) return library
def update_project(args, api, resume, session_file=None, path=None, log=None): """Updating project attributes according to input arguments """ # if no project info given by the user, we skip project processing and no # project will be assigned if args.project_id is None: return None # If resuming, try to extract args.project_id form log files if resume: message = u.dated("Project not found. Resuming.\n") resume, project_id = c.checkpoint( c.is_project_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) elif args.project_id: project_id = bigml.api.get_project_id(args.project_id) if project_id is not None: project_args = r.set_project_args(args, name=args.project) project = r.update_project( project_args, args, api, session_file, log) project_id = project['resource'] return project_id
def alternative_dataset_processing(dataset_or_source, suffix, dataset_args, api, args, resume, session_file=None, path=None, log=None): """Creates a dataset. Used in splits to generate train and test datasets """ alternative_dataset = None # if resuming, try to extract dataset form log files if resume: message = u.dated("Dataset not found. Resuming.\n") resume, alternative_dataset = c.checkpoint(c.is_dataset_created, path, "_%s" % suffix, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if alternative_dataset is None: alternative_dataset = r.create_dataset(dataset_or_source, dataset_args, args, api, path, session_file, log, suffix) if alternative_dataset: alternative_dataset = r.get_dataset(alternative_dataset, api, args.verbosity, session_file) return alternative_dataset, resume
def create_evaluation(model_or_ensemble, dataset, evaluation_args, args, api=None, path=None, session_file=None, log=None, seed=SEED): """Create evaluation ``model_or_ensemble``: resource object or id for the model or ensemble that should be evaluated ``dataset``: dataset object or id to evaluate with ``evaluation_args``: arguments for the ``create_evaluation`` call ``args``: input values for bigmler flags ``api``: api to remote objects in BigML ``path``: directory to store the BigMLer generated files in ``session_file``: file to store the messages of that session ``log``: user provided log file ``seed``: seed for the dataset sampling (when needed) """ if api is None: api = bigml.api.BigML() if args.cross_validation_rate > 0: evaluation_args.update(seed=seed) message = dated("Creating evaluation.\n") log_message(message, log_file=session_file, console=args.verbosity) evaluation = api.create_evaluation(model_or_ensemble, dataset, evaluation_args) log_created_resources("evaluation", path, bigml.api.get_evaluation_id(evaluation)) check_resource_error(evaluation, "Failed to create evaluation: ") log_message("%s\n" % evaluation['resource'], log_file=log) return evaluation
def get_models(model_ids, args, api=None, session_file=None): """Retrieves remote models in its actual status """ if api is None: api = bigml.api.BigML() model_id = "" models = model_ids if len(model_ids) == 1: model_id = model_ids[0] message = dated("Retrieving %s. %s\n" % (plural("model", len(model_ids)), get_url(model_id))) log_message(message, log_file=session_file, console=args.verbosity) if len(model_ids) < args.max_batch_models: models = [] for model in model_ids: try: model = check_resource(model, api.get_model, query_string=FIELDS_QS) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) models.append(model) model = models[0]
def ensemble_processing(dataset, name, description, objective_field, fields, api, args, resume, session_file=None, path=None, log=None): """Creates an ensemble of models from the input data """ ensemble = None if resume: message = u.dated("Ensemble not found. Resuming.\n") resume, ensemble = c.checkpoint(c.is_ensemble_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if ensemble is None: ensemble_args = r.set_ensemble_args(name, description, args, objective_field, fields) ensemble = r.create_ensemble(dataset, ensemble_args, args, api, path, session_file, log) return ensemble, resume
def get_logistic_regressions(logistic_regression_ids, args, api=None, session_file=None): """Retrieves remote logistic regression in its actual status """ if api is None: api = bigml.api.BigML() logistic_regression_id = "" logistic_regressions = logistic_regression_ids logistic_regression_id = logistic_regression_ids[0] message = dated( "Retrieving %s. %s\n" % (plural("logistic regression", len(logistic_regression_ids)), get_url(logistic_regression_id))) log_message(message, log_file=session_file, console=args.verbosity) # only one logistic regression to predict at present try: # we need the whole fields structure when exporting fields query_string = FIELDS_QS if not args.export_fields else ALL_FIELDS_QS logistic_regression = check_resource(logistic_regression_ids[0], api.get_logistic_regression, query_string=query_string, raise_on_error=True) except Exception, exception: sys.exit("Failed to get a finished logistic regression: %s" % \ str(exception))
def create_batch_prediction(model_or_ensemble, test_dataset, batch_prediction_args, verbosity, api=None, session_file=None, path=None, log=None): """Creates remote batch_prediction """ if api is None: api = bigml.api.BigML() message = dated("Creating batch prediction.\n") log_message(message, log_file=session_file, console=verbosity) batch_prediction = api.create_batch_prediction(model_or_ensemble, test_dataset, batch_prediction_args) log_created_resources("batch_prediction", path, bigml.api.get_batch_prediction_id(batch_prediction), open_mode='a') batch_prediction_id = check_resource_error( batch_prediction, "Failed to create batch prediction: ") try: batch_prediction = check_resource(batch_prediction, api.get_batch_prediction) except ValueError, exception: sys.exit("Failed to get a finished batch prediction: %s" % str(exception))
def create_batch_prediction(model_or_ensemble, test_dataset, batch_prediction_args, args, api=None, session_file=None, path=None, log=None): """Creates remote batch_prediction """ if api is None: api = bigml.api.BigML() message = dated("Creating batch prediction.\n") log_message(message, log_file=session_file, console=args.verbosity) batch_prediction = api.create_batch_prediction(model_or_ensemble, test_dataset, batch_prediction_args, retries=None) log_created_resources("batch_prediction", path, bigml.api.get_batch_prediction_id(batch_prediction), mode='a') batch_prediction_id = check_resource_error( batch_prediction, "Failed to create batch prediction: ") try: batch_prediction = check_resource(batch_prediction, api.get_batch_prediction, raise_on_error=True) except Exception, exception: sys.exit("Failed to get a finished batch prediction: %s" % str(exception))
def model_per_label(labels, datasets, fields, objective_field, api, args, resume, name=None, description=None, model_fields=None, multi_label_data=None, session_file=None, path=None, log=None): """Creates a model per label for multi-label datasets """ model_ids = [] models = [] args.number_of_models = len(labels) if resume: resume, model_ids = c.checkpoint( c.are_models_created, path, args.number_of_models, debug=args.debug) if not resume: message = u.dated("Found %s models out of %s." " Resuming.\n" % (len(model_ids), args.number_of_models)) u.log_message(message, log_file=session_file, console=args.verbosity) models = model_ids args.number_of_models = len(labels) - len(model_ids) model_args_list = r.set_label_model_args( name, description, args, labels, multi_label_data, fields, model_fields, objective_field) # create models changing the input_field to select # only one label at a time models, model_ids = r.create_models( datasets, models, model_args_list, args, api, path, session_file, log) args.number_of_models = 1 return models, model_ids, resume
def get_models(model_ids, args, api=None, session_file=None): """Retrieves remote models in its actual status """ if api is None: api = bigml.api.BigML() model_id = "" models = model_ids single_model = len(model_ids) == 1 if single_model: model_id = model_ids[0] message = dated("Retrieving %s. %s\n" % (plural("model", len(model_ids)), get_url(model_id))) log_message(message, log_file=session_file, console=args.verbosity) if len(model_ids) < args.max_batch_models: models = [] for model in model_ids: try: # if there's more than one model the first one must contain # the entire field structure to be used as reference. query_string = (ALL_FIELDS_QS if not single_model and (len(models) == 0 or args.multi_label) else FIELDS_QS) model = check_resource(model, api.get_model, query_string=query_string) except ValueError, exception: sys.exit("Failed to get a finished model: %s" % str(exception)) models.append(model) model = models[0]
def update_external_connector(args, api, resume, session_file=None, path=None, log=None): """Updating external connector attributes according to input arguments """ # if no external connector info given by the user, we skip processing and # no update will be performed if args.external_connector_id is None: return None # If resuming, try to extract args.external_connector_id form log files if resume: message = u.dated("External connector not found. Resuming.\n") resume, external_connector_id = c.checkpoint( c.is_external_connector_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) elif args.external_connector_id: external_connector_id = bigml.api.get_external_connector_id( \ args.external_connector_id) if external_connector_id is not None: external_connector_args = r.set_basic_args(args, args.name) external_connector = r.update_external_connector( external_connector_args, args, api, session_file, log) external_connector_id = external_connector['resource'] return external_connector_id
def update_project(args, api, resume, session_file=None, path=None, log=None): """Updating project attributes according to input arguments """ # if no project info given by the user, we skip project processing and no # project will be assigned if args.project_id is None: return None # If resuming, try to extract args.project_id form log files if resume: message = u.dated("Project not found. Resuming.\n") resume, project_id = c.checkpoint(c.is_project_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) elif args.project_id: project_id = bigml.api.get_project_id(args.project_id) if project_id is not None: project_args = r.set_project_args(args, name=args.project) project = r.update_project(project_args, args, api, session_file) project_id = project['resource'] return project_id
def connector_processing(api, args, resume, session_file=None, path=None, log=None): """Creating or retrieving an external connector from input arguments """ # if no external connection info given by the user, we skip # processing and no connector will be created if not u.has_connection_info(args) and args.external_connector_id is None: return None external_connector_id = None if u.has_connection_info(args): # If resuming, try to extract args.external_connector_id form log files if resume: message = u.dated("External connector ID not found. Resuming.\n") resume, external_connector_id = c.checkpoint( c.is_external_connector_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) else: external_connector_id = bigml.api.get_external_connector_id( \ args.external_connector_id) # If no external connector is found, we create a new one. if external_connector_id is None: connector_args = r.set_external_connector_args(\ args, name=args.name) connector = r.create_external_connector( connector_args, args, api, session_file, path, log) external_connector_id = connector['resource'] return external_connector_id
def topic_distribution(topic_models, fields, args, session_file=None): """Computes a topic distribution for each entry in the `test_set`. """ test_set = args.test_set test_set_header = args.test_header output = args.predictions test_reader = TestReader(test_set, test_set_header, fields, None, test_separator=args.test_separator) with UnicodeWriter(output, lineterminator="\n") as output: # columns to exclude if input_data is added to the prediction field exclude, headers = use_prediction_headers(test_reader, fields, args) # Local topic distributions: Topic distributions are computed # locally using topic models' # method message = u.dated("Creating local topic distributions.\n") u.log_message(message, log_file=session_file, console=args.verbosity) local_topic_distribution(topic_models, test_reader, output, args, exclude=exclude, headers=headers) test_reader.close()
def remote_forecast(time_series, forecast_args, args, api, resume, prediction_file=None, session_file=None, path=None, log=None): """Computes a remote forecast. """ time_series_id = bigml.api.get_time_series_id( \ time_series) # if resuming, try to extract dataset form log files if resume: message = u.dated("Forecast not found. Resuming.\n") resume, forecast = c.checkpoint( c.is_forecast_created, path, debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if not resume: local_time_series = TimeSeries(time_series, api=args.retrieve_api_) output = args.predictions if args.test_set is not None: input_data = u.read_json(args.test_set) elif args.horizon is not None: input_data = {local_time_series.objective_id: { \ "horizon": args.horizon}} forecast = create_forecast( time_series_id, input_data, forecast_args, args, api, session_file=session_file, path=path, log=log) write_forecasts(forecast["object"]["forecast"]["result"], output)
def remote_predict_models(models, test_reader, prediction_file, api, args, resume=False, output_path=None, session_file=None, log=None, exclude=None): """Retrieve predictions remotely, combine them and save predictions to file """ predictions_files = [] prediction_args = { "tags": args.tag } test_set_header = test_reader.has_headers() if output_path is None: output_path = u.check_dir(prediction_file) message_logged = False raw_input_data_list = [] for input_data in test_reader: raw_input_data_list.append(input_data) single_model = len(models) == 1 if single_model: prediction_file = UnicodeWriter(prediction_file).open_writer() for model in models: model = bigml.api.get_model_id(model) predictions_file = get_predictions_file_name(model, output_path) predictions_files.append(predictions_file) if (not resume or not c.checkpoint(c.are_predictions_created, predictions_file, test_reader.number_of_tests(), debug=args.debug)[0]): if not message_logged: message = u.dated("Creating remote predictions.\n") u.log_message(message, log_file=session_file, console=args.verbosity) message_logged = True with UnicodeWriter(predictions_file) as predictions_file: for input_data in raw_input_data_list: input_data_dict = test_reader.dict(input_data) prediction = api.create_prediction(model, input_data_dict, by_name=test_set_header, wait_time=0, args=prediction_args) u.check_resource_error(prediction, "Failed to create prediction: ") u.log_message("%s\n" % prediction['resource'], log_file=log) prediction_row = prediction_to_row(prediction) predictions_file.writerow(prediction_row) if single_model: write_prediction(prediction_row[0:2], prediction_file, args.prediction_info, input_data, exclude) if single_model: prediction_file.close_writer() else: combine_votes(predictions_files, Model(models[0]).to_prediction, prediction_file, args.method, args.prediction_info, raw_input_data_list, exclude)
def local_batch_predict(models, headers, test_reader, exclude, fields, resume, output_path, max_models, number_of_tests, api, output, verbosity, method, objective_field, session_file, debug): """Get local predictions form partial Multimodel, combine and save to file """ def draw_progress_bar(current, total): """Draws a text based progress report. """ pct = 100 - ((total - current) * 100) / (total) console_log("Predicted on %s out of %s models [%s%%]" % ( localize(current), localize(total), pct)) models_total = len(models) models_splits = [models[index:(index + max_models)] for index in range(0, models_total, max_models)] input_data_list = [] for row in test_reader: for index in exclude: del row[index] input_data_list.append(fields.pair(row, headers, objective_field)) total_votes = [] models_count = 0 for models_split in models_splits: if resume: for model in models_split: pred_file = get_predictions_file_name(model, output_path) u.checkpoint(u.are_predictions_created, pred_file, number_of_tests, debug=debug) complete_models = [] for index in range(len(models_split)): complete_models.append(api.check_resource( models_split[index], api.get_model)) local_model = MultiModel(complete_models) local_model.batch_predict(input_data_list, output_path, reuse=True) votes = local_model.batch_votes(output_path) models_count += max_models if models_count > models_total: models_count = models_total if verbosity: draw_progress_bar(models_count, models_total) if total_votes: for index in range(0, len(votes)): predictions = total_votes[index].predictions predictions.extend(votes[index].predictions) else: total_votes = votes message = u.dated("Combining predictions.\n") u.log_message(message, log_file=session_file, console=verbosity) for multivote in total_votes: u.write_prediction(multivote.combine(method), output)
def split_processing(dataset, name, description, api, args, resume, session_file=None, path=None, log=None): """Splits a dataset into train and test datasets """ train_dataset = None test_dataset = None sample_rate = 1 - args.test_split # if resuming, try to extract train dataset form log files if resume: message = u.dated("Dataset not found. Resuming.\n") resume, train_dataset = c.checkpoint( c.is_dataset_created, path, "_train", debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if train_dataset is None: dataset_split_args = r.set_dataset_split_args( "%s - train (%s %%)" % (name, int(sample_rate * 100)), description, args, sample_rate, out_of_bag=False) train_dataset = r.create_dataset( dataset, dataset_split_args, args, api, path, session_file, log, "train") if train_dataset: train_dataset = r.get_dataset(train_dataset, api, args.verbosity, session_file) # if resuming, try to extract test dataset form log files if resume: message = u.dated("Dataset not found. Resuming.\n") resume, test_dataset = c.checkpoint( c.is_dataset_created, path, "_test", debug=args.debug, message=message, log_file=session_file, console=args.verbosity) if test_dataset is None: dataset_split_args = r.set_dataset_split_args( "%s - test (%s %%)" % (name, int(args.test_split * 100)), description, args, sample_rate, out_of_bag=True) test_dataset = r.create_dataset( dataset, dataset_split_args, args, api, path, session_file, log, "test") if test_dataset: test_dataset = r.get_dataset(test_dataset, api, args.verbosity, session_file) return train_dataset, test_dataset, resume