def transform_dataset_options(command_args, api): """Retrieves the dataset ids from the different input options """ dataset_ids = None command_args.dataset_ids = [] # Parses dataset/id if provided. if command_args.datasets: dataset_ids = u.read_datasets(command_args.datasets) if len(dataset_ids) == 1: command_args.dataset = dataset_ids[0] command_args.dataset_ids = dataset_ids # Reading test dataset ids is delayed till the very moment of use to ensure # that the newly generated resources files can be used there too command_args.test_dataset_ids = [] # Retrieve dataset/ids if provided. if command_args.dataset_tag: dataset_ids = dataset_ids.extend( u.list_ids(api.list_datasets, "tags__in=%s" % command_args.dataset_tag)) if len(dataset_ids) == 1: command_args.dataset = dataset_ids[0] command_args.dataset_ids = dataset_ids
def get_dataset_info(api, args, resume, source, csv_properties, fields, session_file, path, log): """Creating or retrieving the dataset, test_dataset and related information """ dataset = None datasets = None test_dataset = None if args.dataset_file: # dataset is retrieved from the contents of the given local JSON file model_dataset, csv_properties, fields = u.read_local_resource( args.dataset_file, csv_properties=csv_properties) if not args.datasets: datasets = [model_dataset] dataset = model_dataset else: datasets = u.read_datasets(args.datasets) if not datasets: # dataset is retrieved from the remote object datasets, resume, csv_properties, fields = pd.dataset_processing( source, api, args, resume, fields=fields, csv_properties=csv_properties, session_file=session_file, path=path, log=log) if datasets: dataset = datasets[0] if args.to_csv is not None: resume = pd.export_dataset(dataset, api, args, resume, session_file=session_file, path=path) # If test_split is used, split the dataset in a training and a test dataset # according to the given split if args.test_split > 0: dataset, test_dataset, resume = pd.split_processing( dataset, api, args, resume, session_file=session_file, path=path, log=log) datasets[0] = dataset # If multi-dataset flag is on, generate a new dataset from the given # list of datasets if args.multi_dataset: dataset, resume = pd.create_new_dataset( datasets, api, args, resume, fields=fields, session_file=session_file, path=path, log=log) datasets = [dataset] # Check if the dataset has a generators file associated with it, and # generate a new dataset with the specified field structure if args.new_fields: dataset, resume = pd.create_new_dataset( dataset, api, args, resume, fields=fields, session_file=session_file, path=path, log=log) datasets[0] = dataset if fields and args.export_fields: fields.summary_csv(os.path.join(path, args.export_fields)) return dataset, datasets, test_dataset, resume, csv_properties, fields
def create_kfold_evaluations(datasets_file, args, command_obj, resume=False, counter=0): """ Create k-fold cross-validation from a datasets file """ global subcommand_list output_dir = os.path.normpath( u.check_dir(os.path.join(u"%s%s" % (args.output_dir, counter), u"evaluation.json"))) model_fields = args.model_fields name_suffix = "_subset_%s" % counter name_max_length = NAME_MAX_LENGTH - len(name_suffix) name = "%s%s" % (args.name[0: name_max_length], name_suffix) dataset_id = u.read_datasets(datasets_file)[0] model_dataset = os.path.normpath( os.path.join(u.check_dir(datasets_file), dataset_id.replace("/", "_"))) command = COMMANDS["create_cv"] % (datasets_file, output_dir, name, model_dataset) command_args = command.split() if model_fields: command_args.append("--model-fields") command_args.append(model_fields) command_args.append("--objective") command_args.append(args.objective_field) command_args = add_model_options(command_args, args) """ common_options_list = u.get_options_list(args, command_obj.common_options, prioritary=command_args) command_args.extend(common_options_list) """ command_obj.propagate(command_args, exclude=["--dataset", "--datasets", "--dataset-file"]) command = rebuild_command(command_args) if resume: next_command = subcommand_list.pop() if different_command(next_command, command): resume = False u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) elif not subcommand_list: main_dispatcher(args=['main', '--resume']) resume = False else: u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) evaluation_file = os.path.normpath(os.path.join(output_dir, "evaluation.json")) try: with open(evaluation_file) as evaluation_handler: evaluation = json.loads(evaluation_handler.read()) return evaluation, resume except (ValueError, IOError): sys.exit("Failed to retrieve evaluation.")
def get_test_dataset(args): """Returns the dataset id from one of the possible user options: --test-dataset --test-datasets """ args.test_dataset_ids = [] try: # Parses dataset/id if provided. if args.test_datasets: args.test_dataset_ids = u.read_datasets(args.test_datasets) except AttributeError: pass return (args.test_dataset if args.test_dataset is not None else None if not args.test_dataset_ids else args.test_dataset_ids[0])
def transform_dataset_options(command_args, api): """Retrieves the dataset ids from the different input options """ command_args.dataset_ids = [] command_args.test_dataset_ids = [] try: dataset_ids = None # Parses dataset/id if provided. if command_args.datasets: dataset_ids = u.read_datasets(command_args.datasets) if len(dataset_ids) > 0: command_args.dataset = dataset_ids[-1] command_args.dataset_ids = dataset_ids except Exception: pass # Reading test dataset ids is delayed till the very moment of use to ensure # that the newly generated resources files can be used there too try: # Retrieve dataset/ids if provided. if command_args.dataset_tag: dataset_ids = dataset_ids.extend( u.list_ids(api.list_datasets, "tags__in=%s" % command_args.dataset_tag)) if len(dataset_ids) > 0: command_args.dataset = dataset_ids[-1] command_args.dataset_ids = dataset_ids except Exception: pass # if datasets_json is set, read it info in datasets try: if hasattr(command_args, 'datasets_json') \ and command_args.datasets_json: command_args.dataset_ids = json.loads(command_args.datasets_json) if len(dataset_ids) > 0: command_args.dataset = command_args.dataset_ids[-1] except AttributeError: pass
def transform_args(command_args, flags, api, user_defaults): """Transforms the formatted argument strings into structured arguments """ # Parses attributes in json format if provided command_args.json_args = {} json_attribute_options = { 'source': command_args.source_attributes, 'dataset': command_args.dataset_attributes, 'model': command_args.model_attributes, 'ensemble': command_args.ensemble_attributes, 'evaluation': command_args.evaluation_attributes, 'batch_prediction': command_args.batch_prediction_attributes} for resource_type, attributes_file in json_attribute_options.items(): if attributes_file is not None: command_args.json_args[resource_type] = u.read_json( attributes_file) else: command_args.json_args[resource_type] = {} # Parses dataset generators in json format if provided if command_args.new_fields: json_generators = u.read_json(command_args.new_fields) command_args.dataset_json_generators = json_generators else: command_args.dataset_json_generators = {} # Parses multi-dataset attributes in json such as field maps if command_args.multi_dataset_attributes: multi_dataset_json = u.read_json(command_args.multi_dataset_attributes) command_args.multi_dataset_json= multi_dataset_json else: command_args.multi_dataset_json = {} dataset_ids = None command_args.dataset_ids = [] # Parses dataset/id if provided. if command_args.datasets: dataset_ids = u.read_datasets(command_args.datasets) if len(dataset_ids) == 1: command_args.dataset = dataset_ids[0] command_args.dataset_ids = dataset_ids test_dataset_ids = None command_args.test_dataset_ids = [] # Parses dataset/id if provided. if command_args.test_datasets: test_dataset_ids = u.read_datasets(command_args.test_datasets) command_args.test_dataset_ids = test_dataset_ids # Retrieve dataset/ids if provided. if command_args.dataset_tag: dataset_ids = dataset_ids.extend( u.list_ids(api.list_datasets, "tags__in=%s" % command_args.dataset_tag)) if len(dataset_ids) == 1: command_args.dataset = dataset_ids[0] command_args.dataset_ids = dataset_ids # Reads a json filter if provided. if command_args.json_filter: json_filter = u.read_json_filter(command_args.json_filter) command_args.json_filter = json_filter # Reads a lisp filter if provided. if command_args.lisp_filter: lisp_filter = u.read_lisp_filter(command_args.lisp_filter) command_args.lisp_filter = lisp_filter # Adds default tags unless that it is requested not to do so. if command_args.no_tag: command_args.tag.append('BigMLer') command_args.tag.append('BigMLer_%s' % NOW) # Checks combined votes method if (command_args.method and command_args.method != COMBINATION_LABEL and not (command_args.method in COMBINATION_WEIGHTS.keys())): command_args.method = 0 else: combiner_methods = dict([[value, key] for key, value in COMBINER_MAP.items()]) combiner_methods[COMBINATION_LABEL] = COMBINATION command_args.method = combiner_methods.get(command_args.method, 0) # Checks missing_strategy if (command_args.missing_strategy and not (command_args.missing_strategy in MISSING_STRATEGIES.keys())): command_args.missing_strategy = 0 else: command_args.missing_strategy = MISSING_STRATEGIES.get( command_args.missing_strategy, 0) # Adds replacement=True if creating ensemble and nothing is specified if (command_args.number_of_models > 1 and not command_args.replacement and not '--no-replacement' in flags and not 'replacement' in user_defaults and not '--no-randomize' in flags and not 'randomize' in user_defaults and not '--sample-rate' in flags and not 'sample_rate' in user_defaults): command_args.replacement = True # Old value for --prediction-info='full data' maps to 'full' if command_args.prediction_info == 'full data': print "WARNING: 'full data' is a deprecated value. Use 'full' instead" command_args.prediction_info = FULL_FORMAT # Parses class, weight pairs for objective weight if command_args.objective_weights: objective_weights = ( u.read_objective_weights(command_args.objective_weights)) command_args.objective_weights_json = objective_weights command_args.multi_label_fields_list = [] if command_args.multi_label_fields is not None: multi_label_fields = command_args.multi_label_fields.strip() command_args.multi_label_fields_list = multi_label_fields.split( command_args.args_separator) # Sets shared_flag if --shared or --unshared has been used if '--shared' in flags or '--unshared' in flags: command_args.shared_flag = True else: command_args.shared_flag = False
def compute_output(api, args): """ Creates one or more models using the `training_set` or uses the ids of previously created BigML models to make predictions for the `test_set`. """ source = None dataset = None model = None models = None fields = None other_label = OTHER ensemble_ids = [] multi_label_data = None multi_label_fields = [] local_ensemble = None test_dataset = None datasets = None # variables from command-line options resume = args.resume_ model_ids = args.model_ids_ output = args.predictions dataset_fields = args.dataset_fields_ # It is compulsory to have a description to publish either datasets or # models if (not args.description_ and (args.black_box or args.white_box or args.public_dataset)): sys.exit("You should provide a description to publish.") # When using --max-categories, it is compulsory to specify also the # objective_field if args.max_categories > 0 and args.objective_field is None: sys.exit("When --max-categories is used, you must also provide the" " --objective field name or column number") # When using --new-fields, it is compulsory to specify also a dataset # id if args.new_fields and not args.dataset: sys.exit("To use --new-fields you must also provide a dataset id" " to generate the new dataset from it.") path = u.check_dir(output) session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG) csv_properties = {} # If logging is required set the file for logging log = None if args.log_file: u.check_dir(args.log_file) log = args.log_file # If --clear_logs the log files are cleared clear_log_files([log]) # labels to be used in multi-label expansion labels = (None if args.labels is None else [label.strip() for label in args.labels.split(args.args_separator)]) if labels is not None: labels = sorted([label for label in labels]) # multi_label file must be preprocessed to obtain a new extended file if args.multi_label and args.training_set is not None: (args.training_set, multi_label_data) = ps.multi_label_expansion( args.training_set, args.train_header, args, path, labels=labels, session_file=session_file) args.train_header = True args.objective_field = multi_label_data["objective_name"] all_labels = l.get_all_labels(multi_label_data) if not labels: labels = all_labels else: all_labels = labels if args.source_file: # source is retrieved from the contents of the given local JSON file source, csv_properties, fields = u.read_local_resource( args.source_file, csv_properties=csv_properties) else: # source is retrieved from the remote object source, resume, csv_properties, fields = ps.source_processing( api, args, resume, csv_properties=csv_properties, multi_label_data=multi_label_data, session_file=session_file, path=path, log=log) if args.multi_label and source: multi_label_data = l.get_multi_label_data(source) (args.objective_field, labels, all_labels, multi_label_fields) = l.multi_label_sync(args.objective_field, labels, multi_label_data, fields, multi_label_fields) if args.dataset_file: # dataset is retrieved from the contents of the given local JSON file model_dataset, csv_properties, fields = u.read_local_resource( args.dataset_file, csv_properties=csv_properties) if not args.datasets: datasets = [model_dataset] dataset = model_dataset else: datasets = u.read_datasets(args.datasets) if not datasets: # dataset is retrieved from the remote object datasets, resume, csv_properties, fields = pd.dataset_processing( source, api, args, resume, fields=fields, csv_properties=csv_properties, multi_label_data=multi_label_data, session_file=session_file, path=path, log=log) if datasets: dataset = datasets[0] if args.to_csv is not None: resume = pd.export_dataset(dataset, api, args, resume, session_file=session_file, path=path) # If test_split is used, split the dataset in a training and a test dataset # according to the given split if args.test_split > 0: dataset, test_dataset, resume = pd.split_processing( dataset, api, args, resume, multi_label_data=multi_label_data, session_file=session_file, path=path, log=log) datasets[0] = dataset # Check if the dataset has a categorical objective field and it # has a max_categories limit for categories if args.max_categories > 0 and len(datasets) == 1: try: objective_id = fields.field_id(fields.objective_field) except ValueError, exc: sys.exit(exc) if pd.check_max_categories(fields.fields[objective_id]): distribution = pd.get_categories_distribution(dataset, objective_id) if distribution and len(distribution) > args.max_categories: categories = [element[0] for element in distribution] other_label = pd.create_other_label(categories, other_label) datasets, resume = pd.create_categories_datasets( dataset, distribution, fields, args, api, resume, session_file=session_file, path=path, log=log, other_label=other_label) else: sys.exit("The provided objective field is not categorical nor " "a full terms only text field. " "Only these fields can be used with" " --max-categories")
def transform_args(command_args, flags, api, user_defaults): """Transforms the formatted argument strings into structured arguments """ # Parses attributes in json format if provided command_args.json_args = {} json_attribute_options = { 'source': command_args.source_attributes, 'dataset': command_args.dataset_attributes, 'model': command_args.model_attributes, 'ensemble': command_args.ensemble_attributes, 'evaluation': command_args.evaluation_attributes, 'batch_prediction': command_args.batch_prediction_attributes } for resource_type, attributes_file in json_attribute_options.items(): if attributes_file is not None: command_args.json_args[resource_type] = u.read_json( attributes_file) else: command_args.json_args[resource_type] = {} # Parses dataset generators in json format if provided if command_args.new_fields: json_generators = u.read_json(command_args.new_fields) command_args.dataset_json_generators = json_generators else: command_args.dataset_json_generators = {} # Parses multi-dataset attributes in json such as field maps if command_args.multi_dataset_attributes: multi_dataset_json = u.read_json(command_args.multi_dataset_attributes) command_args.multi_dataset_json = multi_dataset_json else: command_args.multi_dataset_json = {} dataset_ids = None command_args.dataset_ids = [] # Parses dataset/id if provided. if command_args.datasets: dataset_ids = u.read_datasets(command_args.datasets) if len(dataset_ids) == 1: command_args.dataset = dataset_ids[0] command_args.dataset_ids = dataset_ids # Retrieve dataset/ids if provided. if command_args.dataset_tag: dataset_ids = dataset_ids.extend( u.list_ids(api.list_datasets, "tags__in=%s" % command_args.dataset_tag)) if len(dataset_ids) == 1: command_args.dataset = dataset_ids[0] command_args.dataset_ids = dataset_ids # Reads a json filter if provided. if command_args.json_filter: json_filter = u.read_json_filter(command_args.json_filter) command_args.json_filter = json_filter # Reads a lisp filter if provided. if command_args.lisp_filter: lisp_filter = u.read_lisp_filter(command_args.lisp_filter) command_args.lisp_filter = lisp_filter # Adds default tags unless that it is requested not to do so. if command_args.no_tag: command_args.tag.append('BigMLer') command_args.tag.append('BigMLer_%s' % NOW) # Checks combined votes method if (command_args.method and command_args.method != COMBINATION_LABEL and not (command_args.method in COMBINATION_WEIGHTS.keys())): command_args.method = 0 else: combiner_methods = dict([[value, key] for key, value in COMBINER_MAP.items()]) combiner_methods[COMBINATION_LABEL] = COMBINATION command_args.method = combiner_methods.get(command_args.method, 0) # Checks missing_strategy if (command_args.missing_strategy and not (command_args.missing_strategy in MISSING_STRATEGIES.keys())): command_args.missing_strategy = 0 else: command_args.missing_strategy = MISSING_STRATEGIES.get( command_args.missing_strategy, 0) # Adds replacement=True if creating ensemble and nothing is specified if (command_args.number_of_models > 1 and not command_args.replacement and not '--no-replacement' in flags and not 'replacement' in user_defaults and not '--no-randomize' in flags and not 'randomize' in user_defaults and not '--sample-rate' in flags and not 'sample_rate' in user_defaults): command_args.replacement = True # Old value for --prediction-info='full data' maps to 'full' if command_args.prediction_info == 'full data': print "WARNING: 'full data' is a deprecated value. Use 'full' instead" command_args.prediction_info = FULL_FORMAT # Parses class, weight pairs for objective weight if command_args.objective_weights: objective_weights = (u.read_objective_weights( command_args.objective_weights)) command_args.objective_weights_json = objective_weights command_args.multi_label_fields_list = [] if command_args.multi_label_fields is not None: multi_label_fields = command_args.multi_label_fields.strip() command_args.multi_label_fields_list = multi_label_fields.split(',')
def transform_args(command_args, flags, api, user_defaults): """Transforms the formatted argument strings into structured arguments """ # Parses attributes in json format if provided command_args.json_args = {} for resource_type in RESOURCE_TYPES: attributes_file = getattr(command_args, "%s_attributes" % resource_type, None) if attributes_file is not None: command_args.json_args[resource_type] = u.read_json( attributes_file) else: command_args.json_args[resource_type] = {} # Parses dataset generators in json format if provided if command_args.new_fields: json_generators = u.read_json(command_args.new_fields) command_args.dataset_json_generators = json_generators else: command_args.dataset_json_generators = {} # Parses multi-dataset attributes in json such as field maps if command_args.multi_dataset_attributes: multi_dataset_json = u.read_json(command_args.multi_dataset_attributes) command_args.multi_dataset_json = multi_dataset_json else: command_args.multi_dataset_json = {} dataset_ids = None command_args.dataset_ids = [] # Parses dataset/id if provided. if command_args.datasets: dataset_ids = u.read_datasets(command_args.datasets) if len(dataset_ids) == 1: command_args.dataset = dataset_ids[0] command_args.dataset_ids = dataset_ids test_dataset_ids = None command_args.test_dataset_ids = [] # Parses dataset/id if provided. if command_args.test_datasets: test_dataset_ids = u.read_datasets(command_args.test_datasets) command_args.test_dataset_ids = test_dataset_ids # Retrieve dataset/ids if provided. if command_args.dataset_tag: dataset_ids = dataset_ids.extend( u.list_ids(api.list_datasets, "tags__in=%s" % command_args.dataset_tag)) if len(dataset_ids) == 1: command_args.dataset = dataset_ids[0] command_args.dataset_ids = dataset_ids # Reads a json filter if provided. if command_args.json_filter: json_filter = u.read_json_filter(command_args.json_filter) command_args.json_filter = json_filter # Reads a lisp filter if provided. if command_args.lisp_filter: lisp_filter = u.read_lisp_filter(command_args.lisp_filter) command_args.lisp_filter = lisp_filter # Adds default tags unless that it is requested not to do so. if command_args.no_tag: command_args.tag.append('BigMLer') command_args.tag.append('BigMLer_%s' % NOW) # Checks combined votes method try: if (command_args.method and command_args.method != COMBINATION_LABEL and not (command_args.method in COMBINATION_WEIGHTS.keys())): command_args.method = 0 else: combiner_methods = dict([[value, key] for key, value in COMBINER_MAP.items()]) combiner_methods[COMBINATION_LABEL] = COMBINATION command_args.method = combiner_methods.get(command_args.method, 0) except AttributeError: pass # Checks missing_strategy try: if (command_args.missing_strategy and not (command_args.missing_strategy in MISSING_STRATEGIES.keys())): command_args.missing_strategy = 0 else: command_args.missing_strategy = MISSING_STRATEGIES.get( command_args.missing_strategy, 0) except AttributeError: pass # Adds replacement=True if creating ensemble and nothing is specified try: if (command_args.number_of_models > 1 and not command_args.replacement and not '--no-replacement' in flags and not 'replacement' in user_defaults and not '--no-randomize' in flags and not 'randomize' in user_defaults and not '--sample-rate' in flags and not 'sample_rate' in user_defaults): command_args.replacement = True except AttributeError: pass # Old value for --prediction-info='full data' maps to 'full' if command_args.prediction_info == 'full data': print "WARNING: 'full data' is a deprecated value. Use 'full' instead" command_args.prediction_info = FULL_FORMAT # Parses class, weight pairs for objective weight try: if command_args.objective_weights: objective_weights = (u.read_objective_weights( command_args.objective_weights)) command_args.objective_weights_json = objective_weights except AttributeError: pass try: command_args.multi_label_fields_list = [] if command_args.multi_label_fields is not None: multi_label_fields = command_args.multi_label_fields.strip() command_args.multi_label_fields_list = multi_label_fields.split( command_args.args_separator) except AttributeError: pass # Sets shared_flag if --shared or --unshared has been used if '--shared' in flags or '--unshared' in flags: command_args.shared_flag = True else: command_args.shared_flag = False command_args.has_models_ = ( (hasattr(command_args, 'model') and command_args.model) or (hasattr(command_args, 'models') and command_args.models) or (hasattr(command_args, 'ensemble') and command_args.ensemble) or (hasattr(command_args, 'ensembles') and command_args.ensembles) or (hasattr(command_args, 'cluster') and command_args.cluster) or (hasattr(command_args, 'clusters') and command_args.clusters) or (hasattr(command_args, 'model_tag') and command_args.model_tag) or (hasattr(command_args, 'anomaly') and command_args.anomaly) or (hasattr(command_args, 'anomalies') and command_args.anomalies) or (hasattr(command_args, 'ensemble_tag') and command_args.ensemble_tag) or (hasattr(command_args, 'cluster_tag') and command_args.cluster_tag) or (hasattr(command_args, 'anomaly_tag') and command_args.anomaly_tag)) command_args.has_datasets_ = ( (hasattr(command_args, 'dataset') and command_args.dataset) or (hasattr(command_args, 'datasets') and command_args.datasets) or (hasattr(command_args, 'dataset_tag') and command_args.dataset_tag))
def compute_output(api, args): """ Creates a dataset using the `training_set`. """ source = None dataset = None fields = None other_label = OTHER multi_label_data = None multi_label_fields = [] datasets = None # variables from command-line options resume = args.resume_ output = args.output dataset_fields = args.dataset_fields_ check_args_coherence(args) path = u.check_dir(output) session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG) csv_properties = {} # If logging is required set the file for logging log = None if args.log_file: u.check_dir(args.log_file) log = args.log_file # If --clear_logs the log files are cleared clear_log_files([log]) # labels to be used in multi-label expansion labels = (None if args.labels is None else [ label.strip() for label in args.labels.split(args.args_separator) ]) if labels is not None: labels = sorted([label for label in labels]) # multi_label file must be preprocessed to obtain a new extended file if args.multi_label and args.training_set is not None: (args.training_set, multi_label_data) = ps.multi_label_expansion( args.training_set, args.train_header, args, path, labels=labels, session_file=session_file) args.train_header = True args.objective_field = multi_label_data["objective_name"] all_labels = l.get_all_labels(multi_label_data) if not labels: labels = all_labels else: all_labels = labels if args.objective_field: csv_properties.update({'objective_field': args.objective_field}) if args.source_file: # source is retrieved from the contents of the given local JSON file source, csv_properties, fields = u.read_local_resource( args.source_file, csv_properties=csv_properties) else: # source is retrieved from the remote object source, resume, csv_properties, fields = ps.source_processing( api, args, resume, csv_properties=csv_properties, multi_label_data=multi_label_data, session_file=session_file, path=path, log=log) if source is not None: args.source = bigml.api.get_source_id(source) if args.multi_label and source: multi_label_data = l.get_multi_label_data(source) (args.objective_field, labels, all_labels, multi_label_fields) = l.multi_label_sync(args.objective_field, labels, multi_label_data, fields, multi_label_fields) if fields and args.export_fields: fields.summary_csv(os.path.join(path, args.export_fields)) if args.dataset_file: # dataset is retrieved from the contents of the given local JSON file model_dataset, csv_properties, fields = u.read_local_resource( args.dataset_file, csv_properties=csv_properties) if not args.datasets: datasets = [model_dataset] dataset = model_dataset else: datasets = u.read_datasets(args.datasets) if not datasets: # dataset is retrieved from the remote object datasets, resume, csv_properties, fields = pd.dataset_processing( source, api, args, resume, fields=fields, csv_properties=csv_properties, multi_label_data=multi_label_data, session_file=session_file, path=path, log=log) if datasets: dataset = datasets[-1] if args.to_csv is not None: resume = pd.export_dataset(dataset, api, args, resume, session_file=session_file, path=path) # Now we have a dataset, let's check if there's an objective_field # given by the user and update it in the fields structure args.objective_id_ = get_objective_id(args, fields) # If test_split is used, split the dataset in a training and a test dataset # according to the given split if args.test_split > 0: dataset, test_dataset, resume = pd.split_processing( dataset, api, args, resume, multi_label_data=multi_label_data, session_file=session_file, path=path, log=log) datasets[0] = dataset # Check if the dataset has a categorical objective field and it # has a max_categories limit for categories if args.max_categories > 0 and len(datasets) == 1: if pd.check_max_categories(fields.fields[args.objective_id_]): distribution = pd.get_categories_distribution( dataset, args.objective_id_) if distribution and len(distribution) > args.max_categories: categories = [element[0] for element in distribution] other_label = pd.create_other_label(categories, other_label) datasets, resume = pd.create_categories_datasets( dataset, distribution, fields, args, api, resume, session_file=session_file, path=path, log=log, other_label=other_label) else: sys.exit("The provided objective field is not categorical nor " "a full terms only text field. " "Only these fields can be used with" " --max-categories") # If any of the transformations is applied, # generate a new dataset from the given list of datasets if args.new_dataset: dataset, resume = pd.create_new_dataset(datasets, api, args, resume, fields=fields, session_file=session_file, path=path, log=log) datasets = [dataset] # Check if the dataset has a generators file associated with it, and # generate a new dataset with the specified field structure. Also # if the --to-dataset flag is used to clone or sample the original dataset if args.new_fields or args.sample_rate != 1 or \ (args.lisp_filter or args.json_filter) and not has_source(args): if fields is None: if isinstance(dataset, basestring): dataset = u.check_resource(dataset, api=api) fields = Fields(dataset, csv_properties) args.objective_id_ = get_objective_id(args, fields) args.objective_name_ = fields.field_name(args.objective_id_) dataset, resume = pd.create_new_dataset(dataset, api, args, resume, fields=fields, session_file=session_file, path=path, log=log) datasets[0] = dataset # rebuild fields structure for new ids and fields csv_properties.update({ 'objective_field': args.objective_name_, 'objective_field_present': True }) fields = pd.get_fields_structure(dataset, csv_properties) args.objective_id_ = get_objective_id(args, fields) if args.multi_label and dataset and multi_label_data is None: multi_label_data = l.get_multi_label_data(dataset) (args.objective_field, labels, all_labels, multi_label_fields) = l.multi_label_sync(args.objective_field, labels, multi_label_data, fields, multi_label_fields) if dataset: # retrieves max_categories data, if any args.max_categories = get_metadata(dataset, 'max_categories', args.max_categories) other_label = get_metadata(dataset, 'other_label', other_label) if fields and args.export_fields: fields.summary_csv(os.path.join(path, args.export_fields)) u.print_generated_files(path, log_file=session_file, verbosity=args.verbosity) if args.reports: clear_reports(path) if args.upload: upload_reports(args.reports, path)
def compute_output(api, args): """ Creates one or more models using the `training_set` or uses the ids of previously created BigML models to make predictions for the `test_set`. """ source = None dataset = None model = None models = None fields = None other_label = OTHER ensemble_ids = [] multi_label_data = None multi_label_fields = [] #local_ensemble = None test_dataset = None datasets = None # variables from command-line options resume = args.resume_ model_ids = args.model_ids_ output = args.predictions dataset_fields = args.dataset_fields_ check_args_coherence(args) path = u.check_dir(output) session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG) csv_properties = {} # If logging is required set the file for logging log = None if args.log_file: u.check_dir(args.log_file) log = args.log_file # If --clear_logs the log files are cleared clear_log_files([log]) # labels to be used in multi-label expansion labels = (None if args.labels is None else [label.strip() for label in args.labels.split(args.args_separator)]) if labels is not None: labels = sorted([label for label in labels]) # multi_label file must be preprocessed to obtain a new extended file if args.multi_label and args.training_set is not None: (args.training_set, multi_label_data) = ps.multi_label_expansion( args.training_set, args.train_header, args, path, labels=labels, session_file=session_file) args.train_header = True args.objective_field = multi_label_data["objective_name"] all_labels = l.get_all_labels(multi_label_data) if not labels: labels = all_labels else: all_labels = labels if args.objective_field: csv_properties.update({'objective_field': args.objective_field}) if args.source_file: # source is retrieved from the contents of the given local JSON file source, csv_properties, fields = u.read_local_resource( args.source_file, csv_properties=csv_properties) else: # source is retrieved from the remote object source, resume, csv_properties, fields = ps.source_processing( api, args, resume, csv_properties=csv_properties, multi_label_data=multi_label_data, session_file=session_file, path=path, log=log) if args.multi_label and source: multi_label_data = l.get_multi_label_data(source) (args.objective_field, labels, all_labels, multi_label_fields) = l.multi_label_sync(args.objective_field, labels, multi_label_data, fields, multi_label_fields) if fields and args.export_fields: fields.summary_csv(os.path.join(path, args.export_fields)) if args.dataset_file: # dataset is retrieved from the contents of the given local JSON file model_dataset, csv_properties, fields = u.read_local_resource( args.dataset_file, csv_properties=csv_properties) if not args.datasets: datasets = [model_dataset] dataset = model_dataset else: datasets = u.read_datasets(args.datasets) if not datasets: # dataset is retrieved from the remote object datasets, resume, csv_properties, fields = pd.dataset_processing( source, api, args, resume, fields=fields, csv_properties=csv_properties, multi_label_data=multi_label_data, session_file=session_file, path=path, log=log) if datasets: dataset = datasets[0] if args.to_csv is not None: resume = pd.export_dataset(dataset, api, args, resume, session_file=session_file, path=path) # Now we have a dataset, let's check if there's an objective_field # given by the user and update it in the fields structure args.objective_id_ = get_objective_id(args, fields) # If test_split is used, split the dataset in a training and a test dataset # according to the given split if args.test_split > 0: dataset, test_dataset, resume = pd.split_processing( dataset, api, args, resume, multi_label_data=multi_label_data, session_file=session_file, path=path, log=log) datasets[0] = dataset # Check if the dataset has a categorical objective field and it # has a max_categories limit for categories if args.max_categories > 0 and len(datasets) == 1: if pd.check_max_categories(fields.fields[args.objective_id_]): distribution = pd.get_categories_distribution(dataset, args.objective_id_) if distribution and len(distribution) > args.max_categories: categories = [element[0] for element in distribution] other_label = pd.create_other_label(categories, other_label) datasets, resume = pd.create_categories_datasets( dataset, distribution, fields, args, api, resume, session_file=session_file, path=path, log=log, other_label=other_label) else: sys.exit("The provided objective field is not categorical nor " "a full terms only text field. " "Only these fields can be used with" " --max-categories") # If multi-dataset flag is on, generate a new dataset from the given # list of datasets if args.multi_dataset: dataset, resume = pd.create_new_dataset( datasets, api, args, resume, fields=fields, session_file=session_file, path=path, log=log) datasets = [dataset] # Check if the dataset has a generators file associated with it, and # generate a new dataset with the specified field structure. Also # if the --to-dataset flag is used to clone or sample the original dataset if args.new_fields or (args.sample_rate != 1 and args.no_model) or \ (args.lisp_filter or args.json_filter) and not has_source(args): if fields is None: if isinstance(dataset, basestring): dataset = u.check_resource(dataset, api=api) fields = Fields(dataset, csv_properties) args.objective_id_ = get_objective_id(args, fields) args.objective_name_ = fields.field_name(args.objective_id_) dataset, resume = pd.create_new_dataset( dataset, api, args, resume, fields=fields, session_file=session_file, path=path, log=log) datasets[0] = dataset # rebuild fields structure for new ids and fields csv_properties.update({'objective_field': args.objective_name_, 'objective_field_present': True}) fields = pd.get_fields_structure(dataset, csv_properties) args.objective_id_ = get_objective_id(args, fields) if args.multi_label and dataset and multi_label_data is None: multi_label_data = l.get_multi_label_data(dataset) (args.objective_field, labels, all_labels, multi_label_fields) = l.multi_label_sync(args.objective_field, labels, multi_label_data, fields, multi_label_fields) if dataset: # retrieves max_categories data, if any args.max_categories = get_metadata(dataset, 'max_categories', args.max_categories) other_label = get_metadata(dataset, 'other_label', other_label) if fields and args.export_fields: fields.summary_csv(os.path.join(path, args.export_fields)) if args.model_file: # model is retrieved from the contents of the given local JSON file model, csv_properties, fields = u.read_local_resource( args.model_file, csv_properties=csv_properties) models = [model] model_ids = [model['resource']] ensemble_ids = [] elif args.ensemble_file: # model is retrieved from the contents of the given local JSON file ensemble, csv_properties, fields = u.read_local_resource( args.ensemble_file, csv_properties=csv_properties) model_ids = ensemble['object']['models'][:] ensemble_ids = [ensemble['resource']] models = model_ids[:] model = retrieve_resource(bigml.api.BigML(storage='./storage'), models[0], query_string=r.ALL_FIELDS_QS) models[0] = model else: # model is retrieved from the remote object models, model_ids, ensemble_ids, resume = pm.models_processing( datasets, models, model_ids, api, args, resume, fields=fields, session_file=session_file, path=path, log=log, labels=labels, multi_label_data=multi_label_data, other_label=other_label) if models: model = models[0] single_model = len(models) == 1 # If multi-label flag is set and no training_set was provided, label # info is extracted from the user_metadata. If models belong to an # ensemble, the ensemble must be retrieved to get the user_metadata. if model and args.multi_label and multi_label_data is None: if len(ensemble_ids) > 0 and isinstance(ensemble_ids[0], dict): resource = ensemble_ids[0] elif belongs_to_ensemble(model): ensemble_id = get_ensemble_id(model) resource = r.get_ensemble(ensemble_id, api=api, verbosity=args.verbosity, session_file=session_file) else: resource = model multi_label_data = l.get_multi_label_data(resource) # We update the model's public state if needed if model: if (isinstance(model, basestring) or bigml.api.get_status(model)['code'] != bigml.api.FINISHED): if not args.evaluate and not a.has_train(args) and \ not a.has_test(args) : query_string = MINIMUM_MODEL elif not args.test_header: query_string = r.ALL_FIELDS_QS else: query_string = "%s;%s" % (r.ALL_FIELDS_QS, r.FIELDS_QS) model = u.check_resource(model, api.get_model, query_string=query_string) models[0] = model if (args.black_box or args.white_box or (args.shared_flag and r.shared_changed(args.shared, model))): model_args = {} if args.shared_flag and r.shared_changed(args.shared, model): model_args.update(shared=args.shared) if args.black_box or args.white_box: model_args.update(r.set_publish_model_args(args)) if model_args: model = r.update_model(model, model_args, args, api=api, path=path, session_file=session_file) models[0] = model # We get the fields of the model if we haven't got # them yet and need them if model and not args.evaluate and (a.has_test(args) or args.export_fields): # If more than one model, use the full field structure if (not single_model and not args.multi_label and belongs_to_ensemble(model)): if len(ensemble_ids) > 0: ensemble_id = ensemble_ids[0] args.ensemble_ids_ = ensemble_ids else: ensemble_id = get_ensemble_id(model) fields = pm.get_model_fields( model, csv_properties, args, single_model=single_model, multi_label_data=multi_label_data) # Free memory after getting fields # local_ensemble = None gc.collect() # Fills in all_labels from user_metadata if args.multi_label and not all_labels: (args.objective_field, labels, all_labels, multi_label_fields) = l.multi_label_sync(args.objective_field, labels, multi_label_data, fields, multi_label_fields) if model: # retrieves max_categories data, if any args.max_categories = get_metadata(model, 'max_categories', args.max_categories) other_label = get_metadata(model, 'other_label', other_label) if fields and args.export_fields: fields.summary_csv(os.path.join(path, args.export_fields)) # If predicting if (models and (a.has_test(args) or (test_dataset and args.remote)) and not args.evaluate): models_per_label = 1 if test_dataset is None: test_dataset = get_test_dataset(args) if args.multi_label: # When prediction starts from existing models, the # multi_label_fields can be retrieved from the user_metadata # in the models if args.multi_label_fields is None and multi_label_fields: multi_label_field_names = [field[1] for field in multi_label_fields] args.multi_label_fields = ",".join(multi_label_field_names) test_set = ps.multi_label_expansion( args.test_set, args.test_header, args, path, labels=labels, session_file=session_file, input_flag=True)[0] test_set_header = True # Remote predictions: predictions are computed as batch predictions # in bigml.com except when --no-batch flag is set on or multi-label # or max-categories are used if (args.remote and not args.no_batch and not args.multi_label and not args.method in [THRESHOLD_CODE, COMBINATION]): # create test source from file test_name = "%s - test" % args.name if args.test_source is None: test_properties = ps.test_source_processing( api, args, resume, session_file=session_file, path=path, log=log) (test_source, resume, csv_properties, test_fields) = test_properties else: test_source_id = bigml.api.get_source_id(args.test_source) test_source = api.check_resource(test_source_id) if test_dataset is None: # create test dataset from test source dataset_args = r.set_basic_dataset_args(args, name=test_name) test_dataset, resume = pd.alternative_dataset_processing( test_source, "test", dataset_args, api, args, resume, session_file=session_file, path=path, log=log) else: test_dataset_id = bigml.api.get_dataset_id(test_dataset) test_dataset = api.check_resource(test_dataset_id) csv_properties.update(objective_field=None, objective_field_present=False) test_fields = pd.get_fields_structure(test_dataset, csv_properties) if args.to_dataset and args.dataset_off: model = api.check_resource(model['resource'], query_string=r.ALL_FIELDS_QS) model_fields = Fields(model) objective_field_name = model_fields.field_name( \ model_fields.objective_field) if objective_field_name in test_fields.fields_by_name.keys(): args.prediction_name = "%s (predicted)" % \ objective_field_name batch_prediction_args = r.set_batch_prediction_args( args, fields=fields, dataset_fields=test_fields) remote_predict(model, test_dataset, batch_prediction_args, args, api, resume, prediction_file=output, session_file=session_file, path=path, log=log) else: models_per_label = args.number_of_models if (args.multi_label and len(ensemble_ids) > 0 and args.number_of_models == 1): # use case where ensembles are read from a file models_per_label = len(models) / len(ensemble_ids) predict(models, fields, args, api=api, log=log, resume=resume, session_file=session_file, labels=labels, models_per_label=models_per_label, other_label=other_label, multi_label_data=multi_label_data) # When combine_votes flag is used, retrieve the predictions files saved # in the comma separated list of directories and combine them if args.votes_files_: model_id = re.sub(r'.*(model_[a-f0-9]{24})__predictions\.csv$', r'\1', args.votes_files_[0]).replace("_", "/") try: model = u.check_resource(model_id, api.get_model) except ValueError, exception: sys.exit("Failed to get model %s: %s" % (model_id, str(exception))) local_model = Model(model) message = u.dated("Combining votes.\n") u.log_message(message, log_file=session_file, console=args.verbosity) combine_votes(args.votes_files_, local_model.to_prediction, output, method=args.method)
def compute_output(api, args): """ Creates a dataset using the `training_set`. """ source = None dataset = None fields = None other_label = OTHER multi_label_data = None multi_label_fields = [] datasets = None # variables from command-line options resume = args.resume_ output = args.output dataset_fields = args.dataset_fields_ check_args_coherence(args) path = u.check_dir(output) session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG) csv_properties = {} # If logging is required set the file for logging log = None if args.log_file: u.check_dir(args.log_file) log = args.log_file # If --clear_logs the log files are cleared clear_log_files([log]) # labels to be used in multi-label expansion labels = (None if args.labels is None else [label.strip() for label in args.labels.split(args.args_separator)]) if labels is not None: labels = sorted([label for label in labels]) # multi_label file must be preprocessed to obtain a new extended file if args.multi_label and args.training_set is not None: (args.training_set, multi_label_data) = ps.multi_label_expansion( args.training_set, args.train_header, args, path, labels=labels, session_file=session_file) args.train_header = True args.objective_field = multi_label_data["objective_name"] all_labels = l.get_all_labels(multi_label_data) if not labels: labels = all_labels else: all_labels = labels if args.objective_field: csv_properties.update({'objective_field': args.objective_field}) if args.source_file: # source is retrieved from the contents of the given local JSON file source, csv_properties, fields = u.read_local_resource( args.source_file, csv_properties=csv_properties) else: # source is retrieved from the remote object source, resume, csv_properties, fields = ps.source_processing( api, args, resume, csv_properties=csv_properties, multi_label_data=multi_label_data, session_file=session_file, path=path, log=log) if source is not None: args.source = bigml.api.get_source_id(source) if args.multi_label and source: multi_label_data = l.get_multi_label_data(source) (args.objective_field, labels, all_labels, multi_label_fields) = l.multi_label_sync(args.objective_field, labels, multi_label_data, fields, multi_label_fields) if fields and args.export_fields: fields.summary_csv(os.path.join(path, args.export_fields)) if args.dataset_file: # dataset is retrieved from the contents of the given local JSON file model_dataset, csv_properties, fields = u.read_local_resource( args.dataset_file, csv_properties=csv_properties) if not args.datasets: datasets = [model_dataset] dataset = model_dataset else: datasets = u.read_datasets(args.datasets) if not datasets: # dataset is retrieved from the remote object datasets, resume, csv_properties, fields = pd.dataset_processing( source, api, args, resume, fields=fields, csv_properties=csv_properties, multi_label_data=multi_label_data, session_file=session_file, path=path, log=log) if datasets: dataset = datasets[-1] if args.to_csv is not None: resume = pd.export_dataset(dataset, api, args, resume, session_file=session_file, path=path) # Now we have a dataset, let's check if there's an objective_field # given by the user and update it in the fields structure args.objective_id_ = get_objective_id(args, fields) # If test_split is used, split the dataset in a training and a test dataset # according to the given split if args.test_split > 0: dataset, test_dataset, resume = pd.split_processing( dataset, api, args, resume, multi_label_data=multi_label_data, session_file=session_file, path=path, log=log) datasets[0] = dataset # Check if the dataset has a categorical objective field and it # has a max_categories limit for categories if args.max_categories > 0 and len(datasets) == 1: if pd.check_max_categories(fields.fields[args.objective_id_]): distribution = pd.get_categories_distribution(dataset, args.objective_id_) if distribution and len(distribution) > args.max_categories: categories = [element[0] for element in distribution] other_label = pd.create_other_label(categories, other_label) datasets, resume = pd.create_categories_datasets( dataset, distribution, fields, args, api, resume, session_file=session_file, path=path, log=log, other_label=other_label) else: sys.exit("The provided objective field is not categorical nor " "a full terms only text field. " "Only these fields can be used with" " --max-categories") # If any of the transformations is applied, # generate a new dataset from the given list of datasets if args.new_dataset: dataset, resume = pd.create_new_dataset( datasets, api, args, resume, fields=fields, session_file=session_file, path=path, log=log) datasets = [dataset] # Check if the dataset has a generators file associated with it, and # generate a new dataset with the specified field structure. Also # if the --to-dataset flag is used to clone or sample the original dataset if args.new_fields or args.sample_rate != 1 or \ (args.lisp_filter or args.json_filter) and not has_source(args): if fields is None: if isinstance(dataset, basestring): dataset = u.check_resource(dataset, api=api) fields = Fields(dataset, csv_properties) args.objective_id_ = get_objective_id(args, fields) args.objective_name_ = fields.field_name(args.objective_id_) dataset, resume = pd.create_new_dataset( dataset, api, args, resume, fields=fields, session_file=session_file, path=path, log=log) datasets[0] = dataset # rebuild fields structure for new ids and fields csv_properties.update({'objective_field': args.objective_name_, 'objective_field_present': True}) fields = pd.get_fields_structure(dataset, csv_properties) args.objective_id_ = get_objective_id(args, fields) if args.multi_label and dataset and multi_label_data is None: multi_label_data = l.get_multi_label_data(dataset) (args.objective_field, labels, all_labels, multi_label_fields) = l.multi_label_sync(args.objective_field, labels, multi_label_data, fields, multi_label_fields) if dataset: # retrieves max_categories data, if any args.max_categories = get_metadata(dataset, 'max_categories', args.max_categories) other_label = get_metadata(dataset, 'other_label', other_label) if fields and args.export_fields: fields.summary_csv(os.path.join(path, args.export_fields)) u.print_generated_files(path, log_file=session_file, verbosity=args.verbosity) if args.reports: clear_reports(path) if args.upload: upload_reports(args.reports, path)
def transform_args(command_args, flags, api, user_defaults): """Transforms the formatted argument strings into structured arguments """ # Parses attributes in json format if provided command_args.json_args = {} for resource_type in RESOURCE_TYPES: attributes_file = getattr(command_args, "%s_attributes" % resource_type, None) if attributes_file is not None: command_args.json_args[resource_type] = u.read_json( attributes_file) else: command_args.json_args[resource_type] = {} # Parses dataset generators in json format if provided if command_args.new_fields: json_generators = u.read_json(command_args.new_fields) command_args.dataset_json_generators = json_generators else: command_args.dataset_json_generators = {} # Parses multi-dataset attributes in json such as field maps if command_args.multi_dataset_attributes: multi_dataset_json = u.read_json(command_args.multi_dataset_attributes) command_args.multi_dataset_json = multi_dataset_json else: command_args.multi_dataset_json = {} dataset_ids = None command_args.dataset_ids = [] # Parses dataset/id if provided. if command_args.datasets: dataset_ids = u.read_datasets(command_args.datasets) if len(dataset_ids) == 1: command_args.dataset = dataset_ids[0] command_args.dataset_ids = dataset_ids # Reading test dataset ids is delayed till the very moment of use to ensure # that the newly generated resources files can be used there too command_args.test_dataset_ids = [] # Retrieve dataset/ids if provided. if command_args.dataset_tag: dataset_ids = dataset_ids.extend( u.list_ids(api.list_datasets, "tags__in=%s" % command_args.dataset_tag)) if len(dataset_ids) == 1: command_args.dataset = dataset_ids[0] command_args.dataset_ids = dataset_ids # Reads a json filter if provided. if command_args.json_filter: json_filter = u.read_json_filter(command_args.json_filter) command_args.json_filter = json_filter # Reads a lisp filter if provided. if command_args.lisp_filter: lisp_filter = u.read_lisp_filter(command_args.lisp_filter) command_args.lisp_filter = lisp_filter # Adds default tags unless that it is requested not to do so. if command_args.no_tag: command_args.tag.append('BigMLer') command_args.tag.append('BigMLer_%s' % NOW) # Checks combined votes method try: if (command_args.method and command_args.method != COMBINATION_LABEL and not (command_args.method in COMBINATION_WEIGHTS.keys())): command_args.method = 0 else: combiner_methods = dict( [[value, key] for key, value in COMBINER_MAP.items()]) combiner_methods[COMBINATION_LABEL] = COMBINATION command_args.method = combiner_methods.get(command_args.method, 0) except AttributeError: pass # Checks missing_strategy try: if (command_args.missing_strategy and not (command_args.missing_strategy in MISSING_STRATEGIES.keys())): command_args.missing_strategy = 0 else: command_args.missing_strategy = MISSING_STRATEGIES.get( command_args.missing_strategy, 0) except AttributeError: pass # Adds replacement=True if creating ensemble and nothing is specified try: if (command_args.number_of_models > 1 and not command_args.replacement and not '--no-replacement' in flags and not 'replacement' in user_defaults and not '--no-randomize' in flags and not 'randomize' in user_defaults and not '--sample-rate' in flags and not 'sample_rate' in user_defaults): command_args.replacement = True except AttributeError: pass try: # Old value for --prediction-info='full data' maps to 'full' if command_args.prediction_info == 'full data': print ("WARNING: 'full data' is a deprecated value. Use" " 'full' instead") command_args.prediction_info = FULL_FORMAT except AttributeError: pass # Parses class, weight pairs for objective weight try: if command_args.objective_weights: objective_weights = ( u.read_objective_weights(command_args.objective_weights)) command_args.objective_weights_json = objective_weights except AttributeError: pass try: command_args.multi_label_fields_list = [] if command_args.multi_label_fields is not None: multi_label_fields = command_args.multi_label_fields.strip() command_args.multi_label_fields_list = multi_label_fields.split( command_args.args_separator) except AttributeError: pass # Sets shared_flag if --shared or --unshared has been used if '--shared' in flags or '--unshared' in flags: command_args.shared_flag = True else: command_args.shared_flag = False # Set remote on if scoring a trainind dataset in bigmler anomaly try: if command_args.score: command_args.remote = True if not "--prediction-info" in flags: command_args.prediction_info = FULL_FORMAT except AttributeError: pass command_args.has_models_ = ( (hasattr(command_args, 'model') and command_args.model) or (hasattr(command_args, 'models') and command_args.models) or (hasattr(command_args, 'ensemble') and command_args.ensemble) or (hasattr(command_args, 'ensembles') and command_args.ensembles) or (hasattr(command_args, 'cluster') and command_args.cluster) or (hasattr(command_args, 'clusters') and command_args.clusters) or (hasattr(command_args, 'model_tag') and command_args.model_tag) or (hasattr(command_args, 'anomaly') and command_args.anomaly) or (hasattr(command_args, 'anomalies') and command_args.anomalies) or (hasattr(command_args, 'ensemble_tag') and command_args.ensemble_tag) or (hasattr(command_args, 'cluster_tag') and command_args.cluster_tag) or (hasattr(command_args, 'anomaly_tag') and command_args.anomaly_tag)) command_args.has_datasets_ = ( (hasattr(command_args, 'dataset') and command_args.dataset) or (hasattr(command_args, 'datasets') and command_args.datasets) or (hasattr(command_args, 'dataset_tag') and command_args.dataset_tag)) command_args.has_test_datasets_ = ( (hasattr(command_args, 'test_dataset') and command_args.test_dataset) or (hasattr(command_args, 'test_datasets') and command_args.test_datasets) or (hasattr(command_args, 'test_dataset_tag') and command_args.test_dataset_tag))
def create_kfold_datasets_file(args, api, command_obj, resume=False): """Create the kfold dataset resources and store their ids in a file one per line """ message = ('Creating the kfold datasets............\n') u.log_message(message, log_file=session_file, console=args.verbosity) if args.output_dir is None: args.output_dir = a.NOW csv_properties = {} fields = None dataset = None datasets = [] if args.dataset_file: # dataset is retrieved from the contents of the given local JSON file model_dataset, csv_properties, fields = u.read_local_resource( args.dataset_file, csv_properties=csv_properties) if not args.datasets: datasets = [model_dataset] dataset = model_dataset else: datasets = u.read_datasets(args.datasets) dataset_id = dataset['resource'] elif args.dataset: dataset_id = bigml.api.get_dataset_id(args.dataset) datasets = [dataset_id] elif args.dataset_ids: datasets = args.dataset_ids dataset_id = datasets[0] if dataset_id: if not dataset: dataset = api.check_resource(dataset_id, query_string=ALL_FIELDS_QS) try: args.objective_field = int(args.objective_field) except (TypeError, ValueError): pass # if the user provided no objective field, try to use the one in the # dataset if args.objective_field is None: try: args.objective_field = dataset['object'][ 'objective_field']['column_number'] except KeyError: pass # check that kfold_field is unique fields = Fields(dataset, objective_field=args.objective_field, objective_field_present=True) if args.random_fields: default_candidates_limits(args, fields) try: objective_id = fields.field_id(fields.objective_field) objective_name = fields.field_name(objective_id) except ValueError, exc: sys.exit(exc) kfold_field_name = avoid_duplicates(DEFAULT_KFOLD_FIELD, fields) # create jsons to generate partial datasets selecting_file_list, resume = create_kfold_json(args, kfold_field_name, objective_id, resume=resume) # generate test datasets datasets_file, resume = create_kfold_datasets(dataset_id, args, selecting_file_list, command_obj, resume=resume) return datasets_file, objective_name, resume
def create_kfold_datasets_file(args, api, common_options, resume=False): """Create the kfold dataset resources and store their ids in a file one per line """ message = ('Creating the kfold datasets............\n') u.log_message(message, log_file=session_file, console=args.verbosity) if args.output_dir is None: args.output_dir = a.NOW csv_properties = {} fields = None dataset = None datasets = [] if args.dataset_file: # dataset is retrieved from the contents of the given local JSON file model_dataset, csv_properties, fields = u.read_local_resource( args.dataset_file, csv_properties=csv_properties) if not args.datasets: datasets = [model_dataset] dataset = model_dataset else: datasets = u.read_datasets(args.datasets) dataset_id = dataset['resource'] elif args.dataset: dataset_id = bigml.api.get_dataset_id(args.dataset) datasets = [dataset_id] elif args.dataset_ids: datasets = args.dataset_ids dataset_id = datasets[0] if dataset_id: if not dataset: dataset = api.check_resource(dataset_id, query_string=ALL_FIELDS_QS) try: args.objective_field = int(args.objective_field) except (TypeError, ValueError): pass # if the user provided no objective field, try to use the one in the # dataset if args.objective_field is None: try: args.objective_field = dataset['object'][ 'objective_field']['column_number'] except KeyError: pass # check that kfold_field is unique fields = Fields(dataset, objective_field=args.objective_field, objective_field_present=True) if args.random_fields: default_candidates_limits(args, fields) try: objective_id = fields.field_id(fields.objective_field) objective_name = fields.field_name(objective_id) except ValueError, exc: sys.exit(exc) kfold_field_name = avoid_duplicates(DEFAULT_KFOLD_FIELD, fields) # create jsons to generate partial datasets selecting_file_list, resume = create_kfold_json(args, kfold_field_name, objective_id, resume=resume) # generate test datasets datasets_file, resume = create_kfold_datasets(dataset_id, args, selecting_file_list, common_options, resume=resume) return datasets_file, objective_name, resume
def compute_output(api, args): """ Creates one or more models using the `training_set` or uses the ids of previously created BigML models to make predictions for the `test_set`. """ source = None dataset = None model = None models = None fields = None other_label = OTHER ensemble_ids = [] multi_label_data = None multi_label_fields = [] # local_ensemble = None test_dataset = None datasets = None # variables from command-line options resume = args.resume_ model_ids = args.model_ids_ output = args.predictions dataset_fields = args.dataset_fields_ check_args_coherence(args) path = u.check_dir(output) session_file = "%s%s%s" % (path, os.sep, SESSIONS_LOG) csv_properties = {} # If logging is required set the file for logging log = None if args.log_file: u.check_dir(args.log_file) log = args.log_file # If --clear_logs the log files are cleared clear_log_files([log]) # labels to be used in multi-label expansion labels = None if args.labels is None else [label.strip() for label in args.labels.split(args.args_separator)] if labels is not None: labels = sorted([label for label in labels]) # multi_label file must be preprocessed to obtain a new extended file if args.multi_label and args.training_set is not None: (args.training_set, multi_label_data) = ps.multi_label_expansion( args.training_set, args.train_header, args, path, labels=labels, session_file=session_file ) args.train_header = True args.objective_field = multi_label_data["objective_name"] all_labels = l.get_all_labels(multi_label_data) if not labels: labels = all_labels else: all_labels = labels if args.objective_field: csv_properties.update({"objective_field": args.objective_field}) if args.source_file: # source is retrieved from the contents of the given local JSON file source, csv_properties, fields = u.read_local_resource(args.source_file, csv_properties=csv_properties) else: # source is retrieved from the remote object source, resume, csv_properties, fields = ps.source_processing( api, args, resume, csv_properties=csv_properties, multi_label_data=multi_label_data, session_file=session_file, path=path, log=log, ) if args.multi_label and source: multi_label_data = l.get_multi_label_data(source) (args.objective_field, labels, all_labels, multi_label_fields) = l.multi_label_sync( args.objective_field, labels, multi_label_data, fields, multi_label_fields ) if args.dataset_file: # dataset is retrieved from the contents of the given local JSON file model_dataset, csv_properties, fields = u.read_local_resource(args.dataset_file, csv_properties=csv_properties) if not args.datasets: datasets = [model_dataset] dataset = model_dataset else: datasets = u.read_datasets(args.datasets) if not datasets: # dataset is retrieved from the remote object datasets, resume, csv_properties, fields = pd.dataset_processing( source, api, args, resume, fields=fields, csv_properties=csv_properties, multi_label_data=multi_label_data, session_file=session_file, path=path, log=log, ) if datasets: dataset = datasets[0] if args.to_csv is not None: resume = pd.export_dataset(dataset, api, args, resume, session_file=session_file, path=path) # Now we have a dataset, let's check if there's an objective_field # given by the user and update it in the fields structure args.objective_id_ = get_objective_id(args, fields) # If test_split is used, split the dataset in a training and a test dataset # according to the given split if args.test_split > 0: dataset, test_dataset, resume = pd.split_processing( dataset, api, args, resume, multi_label_data=multi_label_data, session_file=session_file, path=path, log=log ) datasets[0] = dataset # Check if the dataset has a categorical objective field and it # has a max_categories limit for categories if args.max_categories > 0 and len(datasets) == 1: if pd.check_max_categories(fields.fields[args.objective_id_]): distribution = pd.get_categories_distribution(dataset, args.objective_id_) if distribution and len(distribution) > args.max_categories: categories = [element[0] for element in distribution] other_label = pd.create_other_label(categories, other_label) datasets, resume = pd.create_categories_datasets( dataset, distribution, fields, args, api, resume, session_file=session_file, path=path, log=log, other_label=other_label, ) else: sys.exit( "The provided objective field is not categorical nor " "a full terms only text field. " "Only these fields can be used with" " --max-categories" ) # If multi-dataset flag is on, generate a new dataset from the given # list of datasets if args.multi_dataset: dataset, resume = pd.create_new_dataset( datasets, api, args, resume, fields=fields, session_file=session_file, path=path, log=log ) datasets = [dataset] # Check if the dataset has a generators file associated with it, and # generate a new dataset with the specified field structure. Also # if the --to-dataset flag is used to clone or sample the original dataset if ( args.new_fields or (args.sample_rate != 1 and args.no_model) or (args.lisp_filter or args.json_filter) and not has_source(args) ): if fields is None: if isinstance(dataset, basestring): dataset = check_resource(dataset, api=api) fields = Fields(dataset, csv_properties) args.objective_id_ = get_objective_id(args, fields) args.objective_name_ = fields.field_name(args.objective_id_) dataset, resume = pd.create_new_dataset( dataset, api, args, resume, fields=fields, session_file=session_file, path=path, log=log ) datasets[0] = dataset # rebuild fields structure for new ids and fields csv_properties.update({"objective_field": args.objective_name_, "objective_field_present": True}) fields = pd.get_fields_structure(dataset, csv_properties) args.objective_id_ = get_objective_id(args, fields) if args.multi_label and dataset and multi_label_data is None: multi_label_data = l.get_multi_label_data(dataset) (args.objective_field, labels, all_labels, multi_label_fields) = l.multi_label_sync( args.objective_field, labels, multi_label_data, fields, multi_label_fields ) if dataset: # retrieves max_categories data, if any args.max_categories = get_metadata(dataset, "max_categories", args.max_categories) other_label = get_metadata(dataset, "other_label", other_label) if args.model_file: # model is retrieved from the contents of the given local JSON file model, csv_properties, fields = u.read_local_resource(args.model_file, csv_properties=csv_properties) models = [model] model_ids = [model["resource"]] ensemble_ids = [] elif args.ensemble_file: # model is retrieved from the contents of the given local JSON file ensemble, csv_properties, fields = u.read_local_resource(args.ensemble_file, csv_properties=csv_properties) model_ids = ensemble["object"]["models"][:] ensemble_ids = [ensemble["resource"]] models = model_ids[:] model = retrieve_resource(bigml.api.BigML(storage="./storage"), models[0], query_string=r.ALL_FIELDS_QS) models[0] = model else: # model is retrieved from the remote object models, model_ids, ensemble_ids, resume = pm.models_processing( datasets, models, model_ids, api, args, resume, fields=fields, session_file=session_file, path=path, log=log, labels=labels, multi_label_data=multi_label_data, other_label=other_label, ) if models: model = models[0] single_model = len(models) == 1 # If multi-label flag is set and no training_set was provided, label # info is extracted from the user_metadata. If models belong to an # ensemble, the ensemble must be retrieved to get the user_metadata. if model and args.multi_label and multi_label_data is None: if len(ensemble_ids) > 0 and isinstance(ensemble_ids[0], dict): resource = ensemble_ids[0] elif belongs_to_ensemble(model): ensemble_id = get_ensemble_id(model) resource = r.get_ensemble(ensemble_id, api=api, verbosity=args.verbosity, session_file=session_file) else: resource = model multi_label_data = l.get_multi_label_data(resource) # We update the model's public state if needed if model: if isinstance(model, basestring) or bigml.api.get_status(model)["code"] != bigml.api.FINISHED: if not args.evaluate and not a.has_train(args): query_string = MINIMUM_MODEL elif not args.test_header: query_string = r.ALL_FIELDS_QS else: query_string = "%s;%s" % (r.ALL_FIELDS_QS, r.FIELDS_QS) model = u.check_resource(model, api.get_model, query_string=query_string) models[0] = model if args.black_box or args.white_box or (args.shared_flag and r.shared_changed(args.shared, model)): model_args = {} if args.shared_flag and r.shared_changed(args.shared, model): model_args.update(shared=args.shared) if args.black_box or args.white_box: model_args.update(r.set_publish_model_args(args)) if model_args: model = r.update_model(model, model_args, args, api=api, path=path, session_file=session_file) models[0] = model # We get the fields of the model if we haven't got # them yet and need them if model and not args.evaluate and args.test_set: # If more than one model, use the full field structure if not single_model and not args.multi_label and belongs_to_ensemble(model): if len(ensemble_ids) > 0: ensemble_id = ensemble_ids[0] else: ensemble_id = get_ensemble_id(model) fields = pm.get_model_fields( model, csv_properties, args, single_model=single_model, multi_label_data=multi_label_data ) # Free memory after getting fields # local_ensemble = None gc.collect() # Fills in all_labels from user_metadata if args.multi_label and not all_labels: (args.objective_field, labels, all_labels, multi_label_fields) = l.multi_label_sync( args.objective_field, labels, multi_label_data, fields, multi_label_fields ) if model: # retrieves max_categories data, if any args.max_categories = get_metadata(model, "max_categories", args.max_categories) other_label = get_metadata(model, "other_label", other_label) # If predicting if models and (a.has_test(args) or (test_dataset and args.remote)) and not args.evaluate: models_per_label = 1 if test_dataset is None: test_dataset = get_test_dataset(args) if args.multi_label: # When prediction starts from existing models, the # multi_label_fields can be retrieved from the user_metadata # in the models if args.multi_label_fields is None and multi_label_fields: multi_label_field_names = [field[1] for field in multi_label_fields] args.multi_label_fields = ",".join(multi_label_field_names) test_set = ps.multi_label_expansion( args.test_set, args.test_header, args, path, labels=labels, session_file=session_file, input_flag=True )[0] test_set_header = True # Remote predictions: predictions are computed as batch predictions # in bigml.com except when --no-batch flag is set on or multi-label # or max-categories are used if ( args.remote and not args.no_batch and not args.multi_label and not args.method in [THRESHOLD_CODE, COMBINATION] ): # create test source from file test_name = "%s - test" % args.name if args.test_source is None: test_properties = ps.test_source_processing( api, args, resume, session_file=session_file, path=path, log=log ) (test_source, resume, csv_properties, test_fields) = test_properties else: test_source_id = bigml.api.get_source_id(args.test_source) test_source = api.check_resource(test_source_id) if test_dataset is None: # create test dataset from test source dataset_args = r.set_basic_dataset_args(args, name=test_name) test_dataset, resume = pd.alternative_dataset_processing( test_source, "test", dataset_args, api, args, resume, session_file=session_file, path=path, log=log ) else: test_dataset_id = bigml.api.get_dataset_id(test_dataset) test_dataset = api.check_resource(test_dataset_id) csv_properties.update(objective_field=None, objective_field_present=False) test_fields = pd.get_fields_structure(test_dataset, csv_properties) batch_prediction_args = r.set_batch_prediction_args(args, fields=fields, dataset_fields=test_fields) remote_predict( model, test_dataset, batch_prediction_args, args, api, resume, prediction_file=output, session_file=session_file, path=path, log=log, ) else: models_per_label = args.number_of_models if args.multi_label and len(ensemble_ids) > 0 and args.number_of_models == 1: # use case where ensembles are read from a file models_per_label = len(models) / len(ensemble_ids) predict( models, fields, args, api=api, log=log, resume=resume, session_file=session_file, labels=labels, models_per_label=models_per_label, other_label=other_label, multi_label_data=multi_label_data, ) # When combine_votes flag is used, retrieve the predictions files saved # in the comma separated list of directories and combine them if args.votes_files_: model_id = re.sub(r".*(model_[a-f0-9]{24})__predictions\.csv$", r"\1", args.votes_files_[0]).replace("_", "/") try: model = u.check_resource(model_id, api.get_model) except ValueError, exception: sys.exit("Failed to get model %s: %s" % (model_id, str(exception))) local_model = Model(model) message = u.dated("Combining votes.\n") u.log_message(message, log_file=session_file, console=args.verbosity) combine_votes(args.votes_files_, local_model.to_prediction, output, method=args.method)
def get_dataset_info(api, args, resume, source, csv_properties, fields, session_file, path, log): """Creating or retrieving the dataset, test_dataset and related information """ dataset = None datasets = None test_dataset = None if args.dataset_file: # dataset is retrieved from the contents of the given local JSON file model_dataset, csv_properties, fields = u.read_local_resource( args.dataset_file, csv_properties=csv_properties) if not args.datasets: datasets = [model_dataset] dataset = model_dataset else: datasets = u.read_datasets(args.datasets) if not datasets: # dataset is retrieved from the remote object datasets, resume, csv_properties, fields = pd.dataset_processing( source, api, args, resume, fields=fields, csv_properties=csv_properties, session_file=session_file, path=path, log=log) if datasets: dataset = datasets[0] if args.to_csv is not None: resume = pd.export_dataset(dataset, api, args, resume, session_file=session_file, path=path) # If test_split is used, split the dataset in a training and a test dataset # according to the given split if args.test_split > 0: if args.subcommand == "time-series": # use ranges dataset, test_dataset, resume = pd.split_range_processing( dataset, api, args, resume, session_file=session_file, path=path, log=log) datasets[0] = dataset else: # use sample_rate dataset, test_dataset, resume = pd.split_processing( dataset, api, args, resume, session_file=session_file, path=path, log=log) datasets[0] = dataset # If multi-dataset flag is on, generate a new dataset from the given # list of datasets if args.multi_dataset: dataset, resume = pd.create_new_dataset(datasets, api, args, resume, fields=fields, session_file=session_file, path=path, log=log) datasets = [dataset] # Check if the dataset has a generators file associated with it, and # generate a new dataset with the specified field structure if args.new_fields: dataset, resume = pd.create_new_dataset(dataset, api, args, resume, fields=fields, session_file=session_file, path=path, log=log) datasets[0] = dataset if fields and args.export_fields: fields.summary_csv(os.path.join(path, args.export_fields)) return dataset, datasets, test_dataset, resume, csv_properties, fields