def save_txt_and_json(object_dict, output, api=None): """Saves in txt and JSON format the contents of a dict object """ open_mode = 'wt' if PYTHON3 else 'wb' message = json.dumps(object_dict) if not PYTHON3: message = utf8(message) with open(output + '.json', open_mode) as dict_json: dict_json.write(message) with open(output + '.txt', open_mode) as dict_txt: api.pprint(object_dict, dict_txt)
def save_evaluation(evaluation, output, api=None): """Creates the evaluation .txt and .json files """ if api is None: api = bigml.api.BigML() evaluation_json = open(output + '.json', 'w', 0) evaluation = evaluation.get('object', evaluation).get('result', evaluation) evaluation_json.write(json.dumps(evaluation)) evaluation_json.flush() evaluation_json.close() evaluation_txt = open(output + '.txt', 'w', 0) api.pprint(evaluation, evaluation_txt) evaluation_txt.flush() evaluation_txt.close()
def compute_output(api, args, training_set, test_set=None, output=None, objective_field=None, description=None, field_attributes=None, types=None, dataset_fields=None, model_fields=None, name=None, training_set_header=True, test_set_header=True, model_ids=None, votes_files=None, resume=False, fields_map=None): """ Creates one or more models using the `training_set` or uses the ids of previously created BigML models to make predictions for the `test_set`. """ source = None dataset = None model = None models = None fields = None path = u.check_dir(output) session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG) csv_properties = {} # If logging is required, open the file for logging log = None if args.log_file: u.check_dir(args.log_file) log = args.log_file # If --clear_logs the log files are cleared if args.clear_logs: try: open(log, 'w', 0).close() except IOError: pass if (training_set or (args.evaluate and test_set)): if resume: resume, args.source = u.checkpoint(u.is_source_created, path, bigml.api, debug=args.debug) if not resume: message = u.dated("Source not found. Resuming.\n") u.log_message(message, log_file=session_file, console=args.verbosity) # If neither a previous source, dataset or model are provided. # we create a new one. Also if --evaluate and test data are provided # we create a new dataset to test with. data_set = None if (training_set and not args.source and not args.dataset and not args.model and not args.models): data_set = training_set data_set_header = training_set_header elif (args.evaluate and test_set and not args.source): data_set = test_set data_set_header = test_set_header if not data_set is None: source_args = { "name": name, "description": description, "category": args.category, "tags": args.tag, "source_parser": {"header": data_set_header}} message = u.dated("Creating source.\n") u.log_message(message, log_file=session_file, console=args.verbosity) source = api.create_source(data_set, source_args, progress_bar=args.progress_bar) source = api.check_resource(source, api.get_source) message = u.dated("Source created: %s\n" % u.get_url(source, api)) u.log_message(message, log_file=session_file, console=args.verbosity) u.log_message("%s\n" % source['resource'], log_file=log) fields = Fields(source['object']['fields'], source['object']['source_parser']['missing_tokens'], source['object']['source_parser']['locale']) source_file = open(path + '/source', 'w', 0) source_file.write("%s\n" % source['resource']) source_file.write("%s\n" % source['object']['name']) source_file.flush() source_file.close() # If a source is provided, we retrieve it. elif args.source: message = u.dated("Retrieving source. %s\n" % u.get_url(args.source, api)) u.log_message(message, log_file=session_file, console=args.verbosity) source = api.get_source(args.source) # If we already have source, we check that is finished and extract the # fields, and update them if needed. if source: if source['object']['status']['code'] != bigml.api.FINISHED: message = u.dated("Retrieving source. %s\n" % u.get_url(source, api)) u.log_message(message, log_file=session_file, console=args.verbosity) source = api.check_resource(source, api.get_source) csv_properties = {'missing_tokens': source['object']['source_parser']['missing_tokens'], 'data_locale': source['object']['source_parser']['locale']} fields = Fields(source['object']['fields'], **csv_properties) update_fields = {} if field_attributes: for (column, value) in field_attributes.iteritems(): update_fields.update({ fields.field_id(column): value}) message = u.dated("Updating source. %s\n" % u.get_url(source, api)) u.log_message(message, log_file=session_file, console=args.verbosity) source = api.update_source(source, {"fields": update_fields}) update_fields = {} if types: for (column, value) in types.iteritems(): update_fields.update({ fields.field_id(column): {'optype': value}}) message = u.dated("Updating source. %s\n" % u.get_url(source, api)) u.log_message(message, log_file=session_file, console=args.verbosity) source = api.update_source(source, {"fields": update_fields}) if (training_set or args.source or (args.evaluate and test_set)): if resume: resume, args.dataset = u.checkpoint(u.is_dataset_created, path, bigml.api, debug=args.debug) if not resume: message = u.dated("Dataset not found. Resuming.\n") u.log_message(message, log_file=session_file, console=args.verbosity) # If we have a source but not dataset or model has been provided, we # create a new dataset if the no_dataset option isn't set up. Also # if evaluate is set and test_set has been provided. if ((source and not args.dataset and not args.model and not model_ids and not args.no_dataset) or (args.evaluate and args.test_set and not args.dataset)): dataset_args = { "name": name, "description": description, "category": args.category, "tags": args.tag } if args.json_filter: dataset_args.update(json_filter=args.json_filter) elif args.lisp_filter: dataset_args.update(lisp_filter=args.lisp_filter) input_fields = [] if dataset_fields: for name in dataset_fields: input_fields.append(fields.field_id(name)) dataset_args.update(input_fields=input_fields) message = u.dated("Creating dataset.\n") u.log_message(message, log_file=session_file, console=args.verbosity) dataset = api.create_dataset(source, dataset_args) dataset = api.check_resource(dataset, api.get_dataset) message = u.dated("Dataset created: %s\n" % u.get_url(dataset, api)) u.log_message(message, log_file=session_file, console=args.verbosity) u.log_message("%s\n" % dataset['resource'], log_file=log) dataset_file = open(path + '/dataset', 'w', 0) dataset_file.write("%s\n" % dataset['resource']) dataset_file.flush() dataset_file.close() # If a dataset is provided, let's retrieve it. elif args.dataset: message = u.dated("Retrieving dataset. %s\n" % u.get_url(args.dataset, api)) u.log_message(message, log_file=session_file, console=args.verbosity) dataset = api.get_dataset(args.dataset) # If we already have a dataset, we check the status and get the fields if # we hadn't them yet. if dataset: if dataset['object']['status']['code'] != bigml.api.FINISHED: message = u.dated("Retrieving dataset. %s\n" % u.get_url(dataset, api)) u.log_message(message, log_file=session_file, console=args.verbosity) dataset = api.check_resource(dataset, api.get_dataset) if not csv_properties: csv_properties = {'data_locale': dataset['object']['locale']} if args.public_dataset: if not description: raise Exception("You should provide a description to publish.") public_dataset = {"private": False} if args.dataset_price: message = u.dated("Updating dataset. %s\n" % u.get_url(dataset, api)) u.log_message(message, log_file=session_file, console=args.verbosity) public_dataset.update(price=args.dataset_price) message = u.dated("Updating dataset. %s\n" % u.get_url(dataset, api)) u.log_message(message, log_file=session_file, console=args.verbosity) dataset = api.update_dataset(dataset, public_dataset) fields = Fields(dataset['object']['fields'], **csv_properties) # If we have a dataset but not a model, we create the model if the no_model # flag hasn't been set up. if (dataset and not args.model and not model_ids and not args.no_model): model_args = { "name": name, "description": description, "category": args.category, "tags": args.tag } if objective_field is not None: model_args.update({"objective_field": fields.field_id(objective_field)}) # If evaluate flag is on, we choose a deterministic sampling with 80% # of the data to create the model if args.evaluate: if args.sample_rate == 1: args.sample_rate = EVALUATE_SAMPLE_RATE seed = SEED model_args.update(seed=seed) input_fields = [] if model_fields: for name in model_fields: input_fields.append(fields.field_id(name)) model_args.update(input_fields=input_fields) if args.pruning and args.pruning != 'smart': model_args.update(stat_pruning=(args.pruning == 'statistical')) model_args.update(sample_rate=args.sample_rate, replacement=args.replacement, randomize=args.randomize) model_ids = [] models = [] if resume: resume, model_ids = u.checkpoint(u.are_models_created, path, args.number_of_models, bigml.api, debug=args.debug) if not resume: message = u.dated("Found %s models out of %s. Resuming.\n" % (len(model_ids), args.number_of_models)) u.log_message(message, log_file=session_file, console=args.verbosity) models = model_ids args.number_of_models -= len(model_ids) model_file = open(path + '/models', 'w', 0) for model_id in model_ids: model_file.write("%s\n" % model_id) last_model = None if args.number_of_models > 0: message = u.dated("Creating %s.\n" % u.plural("model", args.number_of_models)) u.log_message(message, log_file=session_file, console=args.verbosity) for i in range(1, args.number_of_models + 1): if i > args.max_parallel_models: api.check_resource(last_model, api.get_model) model = api.create_model(dataset, model_args) u.log_message("%s\n" % model['resource'], log_file=log) last_model = model model_ids.append(model['resource']) models.append(model) model_file.write("%s\n" % model['resource']) model_file.flush() if args.number_of_models < 2 and args.verbosity: if model['object']['status']['code'] != bigml.api.FINISHED: model = api.check_resource(model, api.get_model) models[0] = model message = u.dated("Model created: %s.\n" % u.get_url(model, api)) u.log_message(message, log_file=session_file, console=args.verbosity) model_file.close() # If a model is provided, we retrieve it. elif args.model: message = u.dated("Retrieving model. %s\n" % u.get_url(args.model, api)) u.log_message(message, log_file=session_file, console=args.verbosity) model = api.get_model(args.model) elif args.models or args.model_tag: models = model_ids[:] if model_ids and test_set and not args.evaluate: model_id = "" if len(model_ids) == 1: model_id = model_ids[0] message = u.dated("Retrieving %s. %s\n" % (u.plural("model", len(model_ids)), u.get_url(model_id, api))) u.log_message(message, log_file=session_file, console=args.verbosity) if len(model_ids) < args.max_batch_models: models = [] for model in model_ids: model = api.check_resource(model, api.get_model) models.append(model) model = models[0] else: model = api.check_resource(model_ids[0], api.get_model) models[0] = model # We check that the model is finished and get the fields if haven't got # them yet. if model and not args.evaluate and (test_set or args.black_box or args.white_box): if model['object']['status']['code'] != bigml.api.FINISHED: message = u.dated("Retrieving model. %s\n" % u.get_url(model, api)) u.log_message(message, log_file=session_file, console=args.verbosity) model = api.check_resource(model, api.get_model) if args.black_box: if not description: raise Exception("You should provide a description to publish.") model = api.update_model(model, {"private": False}) if args.white_box: if not description: raise Exception("You should provide a description to publish.") public_model = {"private": False, "white_box": True} if args.model_price: message = u.dated("Updating model. %s\n" % u.get_url(model, api)) u.log_message(message, log_file=session_file, console=args.verbosity) public_model.update(price=args.model_price) if args.cpp: message = u.dated("Updating model. %s\n" % u.get_url(model, api)) u.log_message(message, log_file=session_file, console=args.verbosity) public_model.update(credits_per_prediction=args.cpp) model = api.update_model(model, public_model) if not csv_properties: csv_properties = {'data_locale': model['object']['locale']} csv_properties.update(verbose=True) if args.user_locale: csv_properties.update(data_locale=args.user_locale) fields = Fields(model['object']['model']['fields'], **csv_properties) if model and not models: models = [model] if models and test_set and not args.evaluate: objective_field = models[0]['object']['objective_fields'] if isinstance(objective_field, list): objective_field = objective_field[0] predict(test_set, test_set_header, models, fields, output, objective_field, args.remote, api, log, args.max_batch_models, args.method, resume, args.tag, args.verbosity, session_file, args.debug) # When combine_votes flag is used, retrieve the predictions files saved # in the comma separated list of directories and combine them if votes_files: model_id = re.sub(r'.*(model_[a-f0-9]{24})__predictions\.csv$', r'\1', votes_files[0]).replace("_", "/") model = api.check_resource(model_id, api.get_model) local_model = Model(model) message = u.dated("Combining votes.\n") u.log_message(message, log_file=session_file, console=args.verbosity) u.combine_votes(votes_files, local_model.to_prediction, output, args.method) # If evaluate flag is on, create remote evaluation and save results in # json and human-readable format. if args.evaluate: if resume: resume, evaluation = u.checkpoint(u.is_evaluation_created, path, bigml.api, debug=args.debug) if not resume: message = u.dated("Evaluation not found. Resuming.\n") u.log_message(message, log_file=session_file, console=args.verbosity) if not resume: evaluation_file = open(path + '/evaluation', 'w', 0) evaluation_args = { "name": name, "description": description, "tags": args.tag } if not fields_map is None: update_map = {} for (dataset_column, model_column) in fields_map.iteritems(): update_map.update({ fields.field_id(dataset_column): fields.field_id(model_column)}) evaluation_args.update({"fields_map": update_map}) if not ((args.dataset or args.test_set) and (args.model or args.models or args.model_tag)): evaluation_args.update(out_of_bag=True, seed=SEED, sample_rate=args.sample_rate) message = u.dated("Creating evaluation.\n") u.log_message(message, log_file=session_file, console=args.verbosity) evaluation = api.create_evaluation(model, dataset, evaluation_args) u.log_message("%s\n" % evaluation['resource'], log_file=log) evaluation_file.write("%s\n" % evaluation['resource']) evaluation_file.flush() evaluation_file.close() message = u.dated("Retrieving evaluation. %s\n" % u.get_url(evaluation, api)) u.log_message(message, log_file=session_file, console=args.verbosity) evaluation = api.check_resource(evaluation, api.get_evaluation) evaluation_json = open(output + '.json', 'w', 0) evaluation_json.write(json.dumps(evaluation['object']['result'])) evaluation_json.flush() evaluation_json.close() evaluation_txt = open(output + '.txt', 'w', 0) api.pprint(evaluation['object']['result'], evaluation_txt) evaluation_txt.flush() evaluation_txt.close() # Workaround to restore windows console cp850 encoding to print the tree if sys.platform == "win32" and sys.stdout.isatty(): import locale data_locale = locale.getlocale() if not data_locale[0] is None: locale.setlocale(locale.LC_ALL, (data_locale[0], "850")) message = (u"\nGenerated files:\n\n" + unicode(u.print_tree(path, " "), "utf-8") + u"\n") else: message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n" u.log_message(message, log_file=session_file, console=args.verbosity)