def best_first_search(datasets_file, api, args, common_options, staleness=None, penalty=None, objective_name=None, resume=False): """Selecting the fields to be used in the model construction """ counter = 0 loop_counter = 0 features_file = os.path.normpath( os.path.join(args.output_dir, FEATURES_LOG)) with open(features_file, u.open_mode("w")) as features_handler: features_writer = csv.writer(features_handler, lineterminator="\n") features_writer.writerow( ["step", "state", "score", "metric_value", "best_score"]) features_handler.flush() if staleness is None: staleness = DEFAULT_STALENESS if penalty is None: penalty = DEFAULT_PENALTY # retrieving the first dataset in the file try: with open(datasets_file, u.open_mode("r")) as datasets_handler: dataset_id = datasets_handler.readline().strip() except IOError, exc: sys.exit("Could not read the generated datasets file: %s" % str(exc)) try: stored_dataset = u.storage_file_name(args.output_dir, dataset_id) with open(stored_dataset, u.open_mode("r")) as dataset_handler: dataset = json.loads(dataset_handler.read()) except IOError: dataset = api.check_resource(dataset_id, query_string=ALL_FIELDS_QS) # initial feature set fields = Fields(dataset) excluded_features = ([] if args.exclude_features is None else args.exclude_features.split(args.args_separator)) try: excluded_ids = [ fields.field_id(feature) for feature in excluded_features ] objective_id = fields.field_id(objective_name) except ValueError, exc: sys.exit(exc)
def create_kfold_datasets_file(args, api, common_options, resume=False): """Create the kfold dataset resources and store their ids in a file one per line """ message = ('Creating the kfold datasets............\n') u.log_message(message, log_file=session_file, console=args.verbosity) if args.output_dir is None: args.output_dir = a.NOW # retrieve dataset dataset_id = bigml.api.get_dataset_id(args.dataset) if dataset_id: dataset = api.check_resource(dataset_id, api.get_dataset) # check that kfold_field is unique fields = Fields(dataset, {"objective_field": args.objective_field, "objective_field_present": True}) objective_id = fields.field_id(fields.objective_field) kfold_field_name = avoid_duplicates(DEFAULT_KFOLD_FIELD, fields) # create jsons to generate partial datasets selecting_file_list, resume = create_kfold_json(args, kfold_field_name, objective_id, resume=resume) # generate test datasets datasets_file, resume = create_kfold_datasets(dataset_id, args, selecting_file_list, fields.objective_field, kfold_field_name, common_options, resume=resume) return datasets_file, fields.field_column_number(objective_id), resume return None, None, None
def best_first_search(datasets_file, api, args, common_options, staleness=None, penalty=None, objective_name=None, resume=False): """Selecting the fields to be used in the model construction """ counter = 0 loop_counter = 0 features_file = os.path.normpath(os.path.join(args.output_dir, FEATURES_LOG)) with open(features_file, u.open_mode("w")) as features_handler: features_writer = csv.writer(features_handler, lineterminator="\n") features_writer.writerow([ "step", "state", "score", "metric_value", "best_score"]) features_handler.flush() if staleness is None: staleness = DEFAULT_STALENESS if penalty is None: penalty = DEFAULT_PENALTY # retrieving the first dataset in the file try: with open(datasets_file, u.open_mode("r")) as datasets_handler: dataset_id = datasets_handler.readline().strip() except IOError, exc: sys.exit("Could not read the generated datasets file: %s" % str(exc)) try: stored_dataset = u.storage_file_name(args.output_dir, dataset_id) with open(stored_dataset, u.open_mode("r")) as dataset_handler: dataset = json.loads(dataset_handler.read()) except IOError: dataset = api.check_resource(dataset_id, query_string=ALL_FIELDS_QS) # initial feature set fields = Fields(dataset) excluded_features = ([] if args.exclude_features is None else args.exclude_features.split( args.args_separator)) try: excluded_ids = [fields.field_id(feature) for feature in excluded_features] objective_id = fields.field_id(objective_name) except ValueError, exc: sys.exit(exc)
def create_kfold_datasets_file(args, api, common_options, resume=False): """Create the kfold dataset resources and store their ids in a file one per line """ message = ('Creating the kfold datasets............\n') u.log_message(message, log_file=session_file, console=args.verbosity) if args.output_dir is None: args.output_dir = a.NOW # retrieve dataset dataset_id = bigml.api.get_dataset_id(args.dataset) if dataset_id: dataset = api.check_resource(dataset_id) try: args.objective_field = int(args.objective_field) except (TypeError, ValueError): pass # if the user provided no objective field, try to use the one in the # dataset if args.objective_field is None: try: args.objective_field = dataset['object'][ 'objective_field']['column_number'] except KeyError: pass # check that kfold_field is unique fields = Fields(dataset, objective_field=args.objective_field, objective_field_present=True) try: objective_id = fields.field_id(fields.objective_field) objective_name = fields.field_name(objective_id) except ValueError, exc: sys.exit(exc) kfold_field_name = avoid_duplicates(DEFAULT_KFOLD_FIELD, fields) # create jsons to generate partial datasets selecting_file_list, resume = create_kfold_json(args, kfold_field_name, objective_id, resume=resume) # generate test datasets datasets_file, resume = create_kfold_datasets(dataset_id, args, selecting_file_list, objective_name, common_options, resume=resume) return datasets_file, objective_name, resume
def create_kfold_datasets_file(args, api, common_options, resume=False): """Create the kfold dataset resources and store their ids in a file one per line """ message = ('Creating the kfold datasets............\n') u.log_message(message, log_file=session_file, console=args.verbosity) if args.output_dir is None: args.output_dir = a.NOW csv_properties = {} fields = None dataset = None datasets = [] if args.dataset_file: # dataset is retrieved from the contents of the given local JSON file model_dataset, csv_properties, fields = u.read_local_resource( args.dataset_file, csv_properties=csv_properties) if not args.datasets: datasets = [model_dataset] dataset = model_dataset else: datasets = u.read_datasets(args.datasets) dataset_id = dataset['resource'] elif args.dataset: dataset_id = bigml.api.get_dataset_id(args.dataset) datasets = [dataset_id] elif args.dataset_ids: datasets = args.dataset_ids dataset_id = datasets[0] if dataset_id: if not dataset: dataset = api.check_resource(dataset_id, query_string=ALL_FIELDS_QS) try: args.objective_field = int(args.objective_field) except (TypeError, ValueError): pass # if the user provided no objective field, try to use the one in the # dataset if args.objective_field is None: try: args.objective_field = dataset['object'][ 'objective_field']['column_number'] except KeyError: pass # check that kfold_field is unique fields = Fields(dataset, objective_field=args.objective_field, objective_field_present=True) if args.random_fields: default_candidates_limits(args, fields) try: objective_id = fields.field_id(fields.objective_field) objective_name = fields.field_name(objective_id) except ValueError, exc: sys.exit(exc) kfold_field_name = avoid_duplicates(DEFAULT_KFOLD_FIELD, fields) # create jsons to generate partial datasets selecting_file_list, resume = create_kfold_json(args, kfold_field_name, objective_id, resume=resume) # generate test datasets datasets_file, resume = create_kfold_datasets(dataset_id, args, selecting_file_list, common_options, resume=resume) return datasets_file, objective_name, resume
sys.exit("Could not read the generated datasets file: %s" % str(exc)) try: stored_dataset = u.storage_file_name(args.output_dir, dataset_id) with open(stored_dataset, u.open_mode("r")) as dataset_handler: dataset = json.loads(dataset_handler.read()) except IOError: dataset = api.check_resource(dataset_id, query_string=ALL_FIELDS_QS) # initial feature set fields = Fields(dataset) excluded_features = ([] if args.exclude_features is None else args.exclude_features.split( args.args_separator)) try: excluded_ids = [fields.field_id(feature) for feature in excluded_features] objective_id = fields.field_id(objective_name) except ValueError, exc: sys.exit(exc) field_ids = [field_id for field_id in fields.preferred_fields() if field_id != objective_id and not field_id in excluded_ids] # headers are extended with a column per field fields_names = [fields.field_name(field_id) for field_id in field_ids] features_header.extend(fields_names) features_writer.writerow(features_header) initial_state = [False for field_id in field_ids] open_list = [(initial_state, - float('inf'), -float('inf'), 0)] closed_list = [] best_state, best_score, best_metric_value, best_counter = open_list[0]
def create_kfold_datasets_file(args, api, command_obj, resume=False): """Create the kfold dataset resources and store their ids in a file one per line """ message = ('Creating the kfold datasets............\n') u.log_message(message, log_file=session_file, console=args.verbosity) if args.output_dir is None: args.output_dir = a.NOW csv_properties = {} fields = None dataset = None datasets = [] if args.dataset_file: # dataset is retrieved from the contents of the given local JSON file model_dataset, csv_properties, fields = u.read_local_resource( args.dataset_file, csv_properties=csv_properties) if not args.datasets: datasets = [model_dataset] dataset = model_dataset else: datasets = u.read_datasets(args.datasets) dataset_id = dataset['resource'] elif args.dataset: dataset_id = bigml.api.get_dataset_id(args.dataset) datasets = [dataset_id] elif args.dataset_ids: datasets = args.dataset_ids dataset_id = datasets[0] if dataset_id: if not dataset: dataset = api.check_resource(dataset_id, query_string=ALL_FIELDS_QS) try: args.objective_field = int(args.objective_field) except (TypeError, ValueError): pass # if the user provided no objective field, try to use the one in the # dataset if args.objective_field is None: try: args.objective_field = dataset['object']['objective_field'][ 'column_number'] except KeyError: pass # check that kfold_field is unique fields = Fields(dataset, objective_field=args.objective_field, objective_field_present=True) if args.random_fields: default_candidates_limits(args, fields) try: objective_id = fields.field_id(fields.objective_field) objective_name = fields.field_name(objective_id) except ValueError, exc: sys.exit(exc) kfold_field_name = avoid_duplicates(DEFAULT_KFOLD_FIELD, fields) # create jsons to generate partial datasets selecting_file_list, resume = create_kfold_json(args, kfold_field_name, objective_id, resume=resume) # generate test datasets datasets_file, resume = create_kfold_datasets(dataset_id, args, selecting_file_list, command_obj, resume=resume) return datasets_file, objective_name, resume
dataset_id = datasets_handler.readline().strip() except IOError, exc: sys.exit("Could not read the generated datasets file: %s" % str(exc)) try: stored_dataset = u.storage_file_name(args.output_dir, dataset_id) with open(stored_dataset, u.open_mode("r")) as dataset_handler: dataset = json.loads(dataset_handler.read()) except IOError: dataset = api.check_resource(dataset_id, query_string=ALL_FIELDS_QS) # initial feature set fields = Fields(dataset) excluded_features = ([] if args.exclude_features is None else args.exclude_features.split(args.args_separator)) try: excluded_ids = [ fields.field_id(feature) for feature in excluded_features ] objective_id = fields.field_id(objective_name) except ValueError, exc: sys.exit(exc) field_ids = [ field_id for field_id in fields.preferred_fields() if field_id != objective_id and not field_id in excluded_ids ] field_ids.sort() # headers are extended with a column per field fields_names = [fields.field_name(field_id) for field_id in field_ids] features_header.extend(fields_names) features_writer.writerow(features_header) initial_state = [False for field_id in field_ids] open_list = [(initial_state, -float('inf'), -float('inf'), 0)]
def compute_output(api, args, training_set, test_set=None, output=None, objective_field=None, description=None, field_attributes=None, types=None, dataset_fields=None, model_fields=None, name=None, training_set_header=True, test_set_header=True, model_ids=None, votes_files=None, resume=False, fields_map=None): """ Creates one or more models using the `training_set` or uses the ids of previously created BigML models to make predictions for the `test_set`. """ source = None dataset = None model = None models = None fields = None path = u.check_dir(output) session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG) csv_properties = {} # If logging is required, open the file for logging log = None if args.log_file: u.check_dir(args.log_file) log = args.log_file # If --clear_logs the log files are cleared if args.clear_logs: try: open(log, 'w', 0).close() except IOError: pass if (training_set or (args.evaluate and test_set)): if resume: resume, args.source = u.checkpoint(u.is_source_created, path, bigml.api, debug=args.debug) if not resume: message = u.dated("Source not found. Resuming.\n") u.log_message(message, log_file=session_file, console=args.verbosity) # If neither a previous source, dataset or model are provided. # we create a new one. Also if --evaluate and test data are provided # we create a new dataset to test with. data_set = None if (training_set and not args.source and not args.dataset and not args.model and not args.models): data_set = training_set data_set_header = training_set_header elif (args.evaluate and test_set and not args.source): data_set = test_set data_set_header = test_set_header if not data_set is None: source_args = { "name": name, "description": description, "category": args.category, "tags": args.tag, "source_parser": {"header": data_set_header}} message = u.dated("Creating source.\n") u.log_message(message, log_file=session_file, console=args.verbosity) source = api.create_source(data_set, source_args, progress_bar=args.progress_bar) source = api.check_resource(source, api.get_source) message = u.dated("Source created: %s\n" % u.get_url(source, api)) u.log_message(message, log_file=session_file, console=args.verbosity) u.log_message("%s\n" % source['resource'], log_file=log) fields = Fields(source['object']['fields'], source['object']['source_parser']['missing_tokens'], source['object']['source_parser']['locale']) source_file = open(path + '/source', 'w', 0) source_file.write("%s\n" % source['resource']) source_file.write("%s\n" % source['object']['name']) source_file.flush() source_file.close() # If a source is provided, we retrieve it. elif args.source: message = u.dated("Retrieving source. %s\n" % u.get_url(args.source, api)) u.log_message(message, log_file=session_file, console=args.verbosity) source = api.get_source(args.source) # If we already have source, we check that is finished and extract the # fields, and update them if needed. if source: if source['object']['status']['code'] != bigml.api.FINISHED: message = u.dated("Retrieving source. %s\n" % u.get_url(source, api)) u.log_message(message, log_file=session_file, console=args.verbosity) source = api.check_resource(source, api.get_source) csv_properties = {'missing_tokens': source['object']['source_parser']['missing_tokens'], 'data_locale': source['object']['source_parser']['locale']} fields = Fields(source['object']['fields'], **csv_properties) update_fields = {} if field_attributes: for (column, value) in field_attributes.iteritems(): update_fields.update({ fields.field_id(column): value}) message = u.dated("Updating source. %s\n" % u.get_url(source, api)) u.log_message(message, log_file=session_file, console=args.verbosity) source = api.update_source(source, {"fields": update_fields}) update_fields = {} if types: for (column, value) in types.iteritems(): update_fields.update({ fields.field_id(column): {'optype': value}}) message = u.dated("Updating source. %s\n" % u.get_url(source, api)) u.log_message(message, log_file=session_file, console=args.verbosity) source = api.update_source(source, {"fields": update_fields}) if (training_set or args.source or (args.evaluate and test_set)): if resume: resume, args.dataset = u.checkpoint(u.is_dataset_created, path, bigml.api, debug=args.debug) if not resume: message = u.dated("Dataset not found. Resuming.\n") u.log_message(message, log_file=session_file, console=args.verbosity) # If we have a source but not dataset or model has been provided, we # create a new dataset if the no_dataset option isn't set up. Also # if evaluate is set and test_set has been provided. if ((source and not args.dataset and not args.model and not model_ids and not args.no_dataset) or (args.evaluate and args.test_set and not args.dataset)): dataset_args = { "name": name, "description": description, "category": args.category, "tags": args.tag } if args.json_filter: dataset_args.update(json_filter=args.json_filter) elif args.lisp_filter: dataset_args.update(lisp_filter=args.lisp_filter) input_fields = [] if dataset_fields: for name in dataset_fields: input_fields.append(fields.field_id(name)) dataset_args.update(input_fields=input_fields) message = u.dated("Creating dataset.\n") u.log_message(message, log_file=session_file, console=args.verbosity) dataset = api.create_dataset(source, dataset_args) dataset = api.check_resource(dataset, api.get_dataset) message = u.dated("Dataset created: %s\n" % u.get_url(dataset, api)) u.log_message(message, log_file=session_file, console=args.verbosity) u.log_message("%s\n" % dataset['resource'], log_file=log) dataset_file = open(path + '/dataset', 'w', 0) dataset_file.write("%s\n" % dataset['resource']) dataset_file.flush() dataset_file.close() # If a dataset is provided, let's retrieve it. elif args.dataset: message = u.dated("Retrieving dataset. %s\n" % u.get_url(args.dataset, api)) u.log_message(message, log_file=session_file, console=args.verbosity) dataset = api.get_dataset(args.dataset) # If we already have a dataset, we check the status and get the fields if # we hadn't them yet. if dataset: if dataset['object']['status']['code'] != bigml.api.FINISHED: message = u.dated("Retrieving dataset. %s\n" % u.get_url(dataset, api)) u.log_message(message, log_file=session_file, console=args.verbosity) dataset = api.check_resource(dataset, api.get_dataset) if not csv_properties: csv_properties = {'data_locale': dataset['object']['locale']} if args.public_dataset: if not description: raise Exception("You should provide a description to publish.") public_dataset = {"private": False} if args.dataset_price: message = u.dated("Updating dataset. %s\n" % u.get_url(dataset, api)) u.log_message(message, log_file=session_file, console=args.verbosity) public_dataset.update(price=args.dataset_price) message = u.dated("Updating dataset. %s\n" % u.get_url(dataset, api)) u.log_message(message, log_file=session_file, console=args.verbosity) dataset = api.update_dataset(dataset, public_dataset) fields = Fields(dataset['object']['fields'], **csv_properties) # If we have a dataset but not a model, we create the model if the no_model # flag hasn't been set up. if (dataset and not args.model and not model_ids and not args.no_model): model_args = { "name": name, "description": description, "category": args.category, "tags": args.tag } if objective_field is not None: model_args.update({"objective_field": fields.field_id(objective_field)}) # If evaluate flag is on, we choose a deterministic sampling with 80% # of the data to create the model if args.evaluate: if args.sample_rate == 1: args.sample_rate = EVALUATE_SAMPLE_RATE seed = SEED model_args.update(seed=seed) input_fields = [] if model_fields: for name in model_fields: input_fields.append(fields.field_id(name)) model_args.update(input_fields=input_fields) if args.pruning and args.pruning != 'smart': model_args.update(stat_pruning=(args.pruning == 'statistical')) model_args.update(sample_rate=args.sample_rate, replacement=args.replacement, randomize=args.randomize) model_ids = [] models = [] if resume: resume, model_ids = u.checkpoint(u.are_models_created, path, args.number_of_models, bigml.api, debug=args.debug) if not resume: message = u.dated("Found %s models out of %s. Resuming.\n" % (len(model_ids), args.number_of_models)) u.log_message(message, log_file=session_file, console=args.verbosity) models = model_ids args.number_of_models -= len(model_ids) model_file = open(path + '/models', 'w', 0) for model_id in model_ids: model_file.write("%s\n" % model_id) last_model = None if args.number_of_models > 0: message = u.dated("Creating %s.\n" % u.plural("model", args.number_of_models)) u.log_message(message, log_file=session_file, console=args.verbosity) for i in range(1, args.number_of_models + 1): if i > args.max_parallel_models: api.check_resource(last_model, api.get_model) model = api.create_model(dataset, model_args) u.log_message("%s\n" % model['resource'], log_file=log) last_model = model model_ids.append(model['resource']) models.append(model) model_file.write("%s\n" % model['resource']) model_file.flush() if args.number_of_models < 2 and args.verbosity: if model['object']['status']['code'] != bigml.api.FINISHED: model = api.check_resource(model, api.get_model) models[0] = model message = u.dated("Model created: %s.\n" % u.get_url(model, api)) u.log_message(message, log_file=session_file, console=args.verbosity) model_file.close() # If a model is provided, we retrieve it. elif args.model: message = u.dated("Retrieving model. %s\n" % u.get_url(args.model, api)) u.log_message(message, log_file=session_file, console=args.verbosity) model = api.get_model(args.model) elif args.models or args.model_tag: models = model_ids[:] if model_ids and test_set and not args.evaluate: model_id = "" if len(model_ids) == 1: model_id = model_ids[0] message = u.dated("Retrieving %s. %s\n" % (u.plural("model", len(model_ids)), u.get_url(model_id, api))) u.log_message(message, log_file=session_file, console=args.verbosity) if len(model_ids) < args.max_batch_models: models = [] for model in model_ids: model = api.check_resource(model, api.get_model) models.append(model) model = models[0] else: model = api.check_resource(model_ids[0], api.get_model) models[0] = model # We check that the model is finished and get the fields if haven't got # them yet. if model and not args.evaluate and (test_set or args.black_box or args.white_box): if model['object']['status']['code'] != bigml.api.FINISHED: message = u.dated("Retrieving model. %s\n" % u.get_url(model, api)) u.log_message(message, log_file=session_file, console=args.verbosity) model = api.check_resource(model, api.get_model) if args.black_box: if not description: raise Exception("You should provide a description to publish.") model = api.update_model(model, {"private": False}) if args.white_box: if not description: raise Exception("You should provide a description to publish.") public_model = {"private": False, "white_box": True} if args.model_price: message = u.dated("Updating model. %s\n" % u.get_url(model, api)) u.log_message(message, log_file=session_file, console=args.verbosity) public_model.update(price=args.model_price) if args.cpp: message = u.dated("Updating model. %s\n" % u.get_url(model, api)) u.log_message(message, log_file=session_file, console=args.verbosity) public_model.update(credits_per_prediction=args.cpp) model = api.update_model(model, public_model) if not csv_properties: csv_properties = {'data_locale': model['object']['locale']} csv_properties.update(verbose=True) if args.user_locale: csv_properties.update(data_locale=args.user_locale) fields = Fields(model['object']['model']['fields'], **csv_properties) if model and not models: models = [model] if models and test_set and not args.evaluate: objective_field = models[0]['object']['objective_fields'] if isinstance(objective_field, list): objective_field = objective_field[0] predict(test_set, test_set_header, models, fields, output, objective_field, args.remote, api, log, args.max_batch_models, args.method, resume, args.tag, args.verbosity, session_file, args.debug) # When combine_votes flag is used, retrieve the predictions files saved # in the comma separated list of directories and combine them if votes_files: model_id = re.sub(r'.*(model_[a-f0-9]{24})__predictions\.csv$', r'\1', votes_files[0]).replace("_", "/") model = api.check_resource(model_id, api.get_model) local_model = Model(model) message = u.dated("Combining votes.\n") u.log_message(message, log_file=session_file, console=args.verbosity) u.combine_votes(votes_files, local_model.to_prediction, output, args.method) # If evaluate flag is on, create remote evaluation and save results in # json and human-readable format. if args.evaluate: if resume: resume, evaluation = u.checkpoint(u.is_evaluation_created, path, bigml.api, debug=args.debug) if not resume: message = u.dated("Evaluation not found. Resuming.\n") u.log_message(message, log_file=session_file, console=args.verbosity) if not resume: evaluation_file = open(path + '/evaluation', 'w', 0) evaluation_args = { "name": name, "description": description, "tags": args.tag } if not fields_map is None: update_map = {} for (dataset_column, model_column) in fields_map.iteritems(): update_map.update({ fields.field_id(dataset_column): fields.field_id(model_column)}) evaluation_args.update({"fields_map": update_map}) if not ((args.dataset or args.test_set) and (args.model or args.models or args.model_tag)): evaluation_args.update(out_of_bag=True, seed=SEED, sample_rate=args.sample_rate) message = u.dated("Creating evaluation.\n") u.log_message(message, log_file=session_file, console=args.verbosity) evaluation = api.create_evaluation(model, dataset, evaluation_args) u.log_message("%s\n" % evaluation['resource'], log_file=log) evaluation_file.write("%s\n" % evaluation['resource']) evaluation_file.flush() evaluation_file.close() message = u.dated("Retrieving evaluation. %s\n" % u.get_url(evaluation, api)) u.log_message(message, log_file=session_file, console=args.verbosity) evaluation = api.check_resource(evaluation, api.get_evaluation) evaluation_json = open(output + '.json', 'w', 0) evaluation_json.write(json.dumps(evaluation['object']['result'])) evaluation_json.flush() evaluation_json.close() evaluation_txt = open(output + '.txt', 'w', 0) api.pprint(evaluation['object']['result'], evaluation_txt) evaluation_txt.flush() evaluation_txt.close() # Workaround to restore windows console cp850 encoding to print the tree if sys.platform == "win32" and sys.stdout.isatty(): import locale data_locale = locale.getlocale() if not data_locale[0] is None: locale.setlocale(locale.LC_ALL, (data_locale[0], "850")) message = (u"\nGenerated files:\n\n" + unicode(u.print_tree(path, " "), "utf-8") + u"\n") else: message = "\nGenerated files:\n\n" + u.print_tree(path, " ") + "\n" u.log_message(message, log_file=session_file, console=args.verbosity)
sys.exit("Could not read the generated datasets file: %s" % str(exc)) try: stored_dataset = u.storage_file_name(args.output_dir, dataset_id) with open(stored_dataset, u.open_mode("r")) as dataset_handler: dataset = json.loads(dataset_handler.read()) except IOError: dataset = api.check_resource(dataset_id, query_string=ALL_FIELDS_QS) # initial feature set fields = Fields(dataset) excluded_features = ([] if args.exclude_features is None else args.exclude_features.split( args.args_separator)) try: excluded_ids = [fields.field_id(feature) for feature in excluded_features] objective_id = fields.field_id(objective_name) except ValueError, exc: sys.exit(exc) field_ids = [field_id for field_id in fields.preferred_fields() if field_id != objective_id and not field_id in excluded_ids] field_ids.sort() # headers are extended with a column per field fields_names = [fields.field_name(field_id) for field_id in field_ids] features_header.extend(fields_names) features_writer.writerow(features_header) initial_state = [False for field_id in field_ids] open_list = [(initial_state, - float('inf'), -float('inf'), 0)] closed_list = []
def SymptomInsert(self, model): session = model.key.get() if session is None: raise endpoints.NotFoundException('Session not found.') if session.symptoms is None : session.symptoms = Symptoms() for s in session.symptoms.items : if s.name == model.name : s.value = model.value break else : symptom = Symptom(name=model.name, value=model.value) session.symptoms.items.append(symptom) logging.debug('starting prediction') p = {} for symptom in session.symptoms.items: p[symptom.name] = symptom.value bigml_local_model = bigml_model.get_local_model() prediction = bigml_local_model.predict(p, add_confidence=True, add_path=True, add_distribution=True, add_count=True, add_next=True) prediction_all = bigml_local_model.predict(p, multiple=5) if prediction['next'] is not None : logging.debug('got fields %s' % bigml_local_model.fields) fields = Fields(bigml_local_model.fields) field_id = fields.field_id(prediction['next']) field = bigml_local_model.fields[field_id] if 'label' in field : label = field['label'] else : label = field['name'] if 'description' in field : description = field['description'] else : description = '' if 'categories' in field['summary'] : cat = [] for c in field['summary']['categories'] : cat.append(c[0]) session.next = Question(label=label, description=description, type=field['optype'], categories=cat) else: session.next = Question(label=label, description=description, type=field['optype']) else : session.next = None session.outcome = Outcome(name=prediction['prediction'], confidence=str(prediction['confidence']), full=prediction_all) session.put() return session
def main(args=sys.argv[1:]): """Parses command-line parameters and calls the actual main function. """ parser = argparse.ArgumentParser( description="Dataset analysis", epilog="BigML, Inc") # source with activity data parser.add_argument('--source', action='store', dest='source', default=None, help="Full path to file") # create private links or not parser.add_argument('--share', action='store_true', default=False, help="Share created resources or not") # weight models or not parser.add_argument('--balance', action='store_true', default=False, help="Weight model or not") args = parser.parse_args(args) if not args.source: sys.exit("You need to provide a valid path to a source") api = BigML() name = "Sean's activity" log("Creating source...") source_args = {'name': name} source = api.create_source(args.source, source_args) if not api.ok(source): sys.exit("Source isn't ready...") log("Creating dataset...") dataset = api.create_dataset(source) if not api.ok(dataset): sys.exit("Dataset isn't ready...") log("Transforming dataset...") # Extends dataset with new field for previous activity, previous duration, # start day, and start hour. Removes first column, start, and end fields. new_dataset_args = { 'name': name, 'new_fields': new_fields(), 'all_but': excluded_fields()} new_dataset = api.create_dataset(dataset, new_dataset_args) if not api.ok(new_dataset): sys.exit("Dataset isn't ready...") # Set objective field to activity fields = Fields(new_dataset['object']['fields']) objective_id = fields.field_id('activity') new_dataset_args = { 'objective_field': {'id': objective_id}} new_dataset = api.update_dataset(new_dataset, new_dataset_args) # Create training and test set for evaluation log("Splitting dataset...") training, test = train_test_split(api, new_dataset) log("Creating a model using the training dataset...") model_args = { 'objective_field': objective_id, 'balance_objective': args.balance, 'name': training['object']['name']} model = api.create_model(training, model_args) if not api.ok(model): sys.exit("Model isn't ready...") # Creating an evaluation log("Evaluating model against the test dataset...") eval_args = { 'name': name + ' - 80% vs 20%'} evaluation = api.create_evaluation(model, test, eval_args) if not api.ok(evaluation): sys.exit("Evaluation isn't ready...") log("Creating model for the full dataset...") model = api.create_model(new_dataset, model_args) if not api.ok(model): sys.exit("Model isn't ready...") # Create private links if args.share: log("Sharing resources...") dataset_private_link = share_dataset(api, new_dataset) model_private_link = share_model(api, model) evaluation_private_link = share_evaluation(api, evaluation) log(dataset_private_link) log(model_private_link) log(evaluation_private_link)
def best_first_search(datasets_file, api, args, common_options, staleness=None, penalty=None, objective_name=None, resume=False): """Selecting the fields to be used in the model construction """ counter = 0 loop_counter = 0 features_file = os.path.normpath(os.path.join(args.output_dir, FEATURES_LOG)) with open(features_file, 'w', 0) as features_handler: features_writer = csv.writer(features_handler, lineterminator="\n") features_writer.writerow([ "step", "state", "score", "metric_value", "best_score"]) features_handler.flush() if staleness is None: staleness = DEFAULT_STALENESS if penalty is None: penalty = DEFAULT_PENALTY # retrieving the first dataset in the file try: with open(datasets_file) as datasets_handler: dataset_id = datasets_handler.readline().strip() except IOError, exc: sys.exit("Could not read the generated datasets file: %s" % str(exc)) dataset = api.check_resource(dataset_id, api.get_dataset) # initial feature set fields = Fields(dataset) excluded_features = ([] if args.exclude_features is None else args.exclude_features.split( args.args_separator)) excluded_ids = [fields.field_id(feature) for feature in excluded_features] objective_id = fields.field_id(objective_name) field_ids = [field_id for field_id in fields.preferred_fields() if field_id != objective_id and not field_id in excluded_ids] initial_state = [False for field_id in field_ids] open_list = [(initial_state, - float('inf'), -float('inf'))] closed_list = [] best_state, best_score, best_metric_value = open_list[0] best_unchanged_count = 0 metric = args.maximize while best_unchanged_count < staleness and open_list: loop_counter += 1 features_set = find_max_state(open_list) state, score, metric_value = features_set features_writer.writerow([ loop_counter, [int(in_set) for in_set in state], score, metric_value, best_score]) features_handler.flush() state_fields = [fields.field_name(field_ids[index]) for (index, in_set) in enumerate(state) if in_set] closed_list.append(features_set) open_list.remove(features_set) if (score - EPSILON) > best_score: best_state, best_score, best_metric_value = features_set best_unchanged_count = 0 if state_fields: message = 'New best state: %s\n' % (state_fields) u.log_message(message, log_file=session_file, console=args.verbosity) if metric in PERCENT_EVAL_METRICS: message = '%s = %0.2f%% (score = %s)\n' % ( metric.capitalize(), metric_value * 100, score) else: message = '%s = %f (score = %s)\n' % ( metric.capitalize(),metric_value, score) u.log_message(message, log_file=session_file, console=args.verbosity) else: best_unchanged_count += 1 children = expand_state(state) for child in children: if (child not in [state for state, _, _ in open_list] and child not in [state for state, _, _ in closed_list]): input_fields = [fields.field_name(field_id) for (i, field_id) in enumerate(field_ids) if child[i]] # create models and evaluation with input_fields args.model_fields = args.args_separator.join(input_fields) counter += 1 (score, metric_value, metric, resume) = kfold_evaluate(datasets_file, args, counter, common_options, penalty=penalty, resume=resume, metric=metric) open_list.append((child, score, metric_value)) best_features = [fields.field_name(field_ids[i]) for (i, score) in enumerate(best_state) if score] message = (u'The best feature subset is: %s \n' % u", ".join(best_features)) u.log_message(message, log_file=session_file, console=1) if metric in PERCENT_EVAL_METRICS: message = (u'%s = %0.2f%%\n' % (metric.capitalize(), (best_metric_value * 100))) else: message = (u'%s = %f\n' % (metric.capitalize(), best_metric_value)) u.log_message(message, log_file=session_file, console=1) message = (u'Evaluated %d/%d feature subsets\n' % ((len(open_list) + len(closed_list)), 2 ** len(field_ids))) u.log_message(message, log_file=session_file, console=1)
""" counter = 0 if staleness is None: staleness = DEFAULT_STALENESS if penalty is None: penalty = DEFAULT_PENALTY # retrieving the first dataset in the file try: with open(datasets_file) as datasets_handler: dataset_id = datasets_handler.readline().strip() except IOError, exc: sys.exit("Could not read the generated datasets file: %s" % str(exc)) dataset = api.check_resource(dataset_id, api.get_dataset) # initial feature set fields = Fields(dataset) objective_id = fields.field_id(objective_column) field_ids = [field_id for field_id in fields.preferred_fields() if field_id != objective_id] initial_state = [False for field_id in field_ids] open_list = [(initial_state,0)] closed_list = [] best_score = -1 best_unchanged_count = 0 metric = args.maximize while best_unchanged_count < staleness and open_list: (state, score) = find_max_state(open_list) state_fields = [fields.field_name(field_ids[i]) for (i, val) in enumerate(state) if val] closed_list.append((state, score)) open_list.remove((state, score)) if (score - EPSILON) > best_score: