def i_create_all_ml_resources(step, tag=None, label_separator=None, number_of_labels=None, data=None, training_separator=None, test=None, output=None): if tag is None or label_separator is None or training_separator is None or number_of_labels is None or data is None or test is None or output is None: assert False world.directory = os.path.dirname(output) world.folders.append(world.directory) world.number_of_models = int(number_of_labels) test = res_filename(test) try: command = ("bigmler --multi-label --train " + res_filename(data) + " --label-separator \"" + label_separator + "\" --training-separator \"" + training_separator + "\" --test " + test + " --store --output " + output + " --tag " + tag + " --max-batch-models 1") command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.test_lines = file_number_of_lines(test) # test file has headers in it, so first line must be ignored world.test_lines -= 1 world.output = output assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def shell_execute(command, output, test=None, options=None, data=None, test_split=None): """Excute bigmler command in shell """ command = check_debug(command) world.directory = os.path.dirname(output) world.folders.append(world.directory) try: retcode = check_call(command, shell=True) if retcode < 0: assert False else: if test is not None: world.test_lines = file_number_of_lines(test) if options is None or options.find('--prediction-header') == -1: # test file has headers in it, so first line must be ignored world.test_lines -= 1 if test_split is not None: data_lines = file_number_of_lines(data) - 1 world.test_lines = int(data_lines * float(test_split)) world.output = output assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_evaluate_ensemble_with_dataset(step, ensemble_dir=None, dataset_dir=None, output=None): if ensemble_dir is None or dataset_dir is None or output is None: assert False world.directory = os.path.dirname(output) world.folders.append(world.directory) ensemble_id = read_id_from_file(os.path.join(ensemble_dir, "ensembles")) dataset_id = read_id_from_file(os.path.join(dataset_dir, "dataset_test")) command = ( "bigmler --dataset " + dataset_id + " --ensemble " + ensemble_id + " --store" + " --output " + output + " --evaluate" ) command = check_debug(command) try: retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.output = output assert True except OSError as exc: assert False, str(exc)
def i_create_with_split_to_evaluate_ensemble(step, data=None, number_of_models=None, split=None, output=None): if data is None or split is None or output is None: assert False command = ( "bigmler --evaluate --train " + res_filename(data) + " --test-split " + split + " --number-of-models " + number_of_models + " --output " + output ) command = check_debug(command) try: retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.directory = os.path.dirname(output) world.folders.append(world.directory) world.output = output assert True except OSError as e: assert False
def i_predict_ml_from_model_tag_with_labels(step, labels=None, tag=None, test=None, output=None): if tag is None or labels is None or test is None or output is None: assert False world.directory = os.path.dirname(output) world.folders.append(world.directory) test = res_filename(test) try: command = ("bigmler --multi-label --model-tag " + tag + " --labels " + labels + " --test " + test + " --store --output " + output + " --max-batch-models 1") command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.test_lines = file_number_of_lines(test) # test file has headers in it, so first line must be ignored world.test_lines -= 1 world.output = output assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_source_with_project(step, data=None, project=None, output_dir=None): if data is None: assert False world.directory = output_dir world.folders.append(world.directory) #Check if the project already exists previous_projects = world.api.list_projects('name=%s' % project) while previous_projects['meta']['total_count'] > 0: print "the project %s already exists, trying with:" % project project += " " + project print project previous_projects = world.api.list_projects('name=%s' % project) try: command = (u"bigmler --train " + res_filename(data) + u" --no-model --no-dataset --store --output-dir " + output_dir + u" --project=\"" + project + "\"") if not PYTHON3: command = command.encode(SYSTEM_ENCODING) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.output = output_dir assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_predict_ml_from_model_tag_with_labels(step, labels=None, tag=None, test=None, output=None): if tag is None or labels is None or test is None or output is None: assert False world.directory = os.path.dirname(output) world.folders.append(world.directory) test = res_filename(test) try: command = ( "bigmler --multi-label --model-tag " + tag + " --labels " + labels + " --test " + test + " --store --output " + output + " --max-batch-models 1" ) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.test_lines = file_number_of_lines(test) # test file has headers in it, so first line must be ignored world.test_lines -= 1 world.output = output assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_output(step, output=None, language=None, resource_type='source', add_fields=False): if output is None and language is None: assert False world.directory = os.path.dirname(output) world.folders.append(world.directory) resource_id = getattr(world, resource_type)['resource'] try: command = (u"bigmler reify --id " + resource_id + " --language " + language + u" --store --output " + output) if add_fields: command += u' --add-fields' command = check_debug(command) if not PYTHON3: command.encode(SYSTEM_ENCODING) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.output = world.directory assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_resources_and_ensembles_from_dataset(step, multi_label=None, number_of_models=None, test=None, output=None): if test is None or output is None: assert False world.directory = os.path.dirname(output) world.folders.append(world.directory) multi_label = "" if multi_label is None else " --multi-label " test = res_filename(test) try: command = ("bigmler " + multi_label + "--dataset " + world.dataset['resource'] + " --number-of-models " + str(number_of_models) + " --test " + test + " --store --output " + output) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.test_lines = file_number_of_lines(test) # test file has headers in it, so first line must be ignored world.test_lines -= 1 world.output = output assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_all_ml_resources(step, tag=None, label_separator=None, number_of_labels=None, data=None, training_separator=None, test=None, output=None): if tag is None or label_separator is None or training_separator is None or number_of_labels is None or data is None or test is None or output is None: assert False world.directory = os.path.dirname(output) world.folders.append(world.directory) world.number_of_models = int(number_of_labels) test = res_filename(test) try: command = ("bigmler --multi-label --train " + res_filename(data) + " --label-separator \"" + label_separator + "\" --training-separator \"" + training_separator + "\" --test " + test + " --store --output " + output + " --tag " + tag + " --max-batch-models 1") command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.test_lines = file_number_of_lines(test) # test file has headers in it, so first line must be ignored world.test_lines -= 1 world.output = output assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_source_with_project(step, data=None, project=None, output_dir=None): ok_(data is not None) world.directory = output_dir world.folders.append(world.directory) #Check if the project already exists previous_projects = world.api.list_projects('name=%s' % project) while previous_projects['meta']['total_count'] > 0: print "the project %s already exists, trying with:" % project project += " " + project print project previous_projects = world.api.list_projects('name=%s' % project) try: command = (u"bigmler --train " + res_filename(data) + u" --no-model --no-dataset --store --output-dir " + output_dir + u" --project=\"" + project + "\"") if not PYTHON3: command = command.encode(SYSTEM_ENCODING) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.output = output_dir except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def shell_execute(command, output, test=None, options=None, test_rows=None, project=True): """Excute bigmler command in shell """ command = check_debug(command, project=project) world.directory = os.path.dirname(output) world.folders.append(world.directory) try: retcode = check_call(command, shell=True) if retcode < 0: assert False else: if test is not None: world.test_lines = file_number_of_lines(test) if options is None or \ options.find('--prediction-header') == -1: # test file has headers in it, so first line must be ignored world.test_lines -= 1 elif test_rows is not None: world.test_lines = test_rows if options is not None and \ options.find('--prediction-header') > -1: world.test_lines += 1 elif options is not None and \ options.find('--prediction-header') > -1: world.test_lines += 1 world.output = output assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_all_resources_to_evaluate_with_model_and_map(step, data=None, fields_map=None, output=None): if data is None or fields_map is None or output is None: assert False command = ( "bigmler --evaluate --test " + res_filename(data) + " --model " + world.model["resource"] + " --output " + output + " --fields-map " + res_filename(fields_map) ) command = check_debug(command) try: retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.directory = os.path.dirname(output) world.folders.append(world.directory) world.output = output assert True except OSError as e: assert False
def i_create_filtered_dataset_from_dataset(step, filter_exp=None, output_dir=None): if filter_exp is None or output_dir is None: assert False world.directory = output_dir world.folders.append(world.directory) try: if not sys.platform == 'win32': filter_exp = u'"%s"' % filter_exp.replace('"', '\\"') command = (u'echo ' + filter_exp + u' > ' + output_dir + u"/filter.lisp") if not PYTHON3: command = command.encode(SYSTEM_ENCODING) retcode = check_call(command, shell=True) command = ((u"bigmler --dataset %s" % world.dataset['resource']) + u" --no-model --store --output-dir " + output_dir + u" --lisp-filter " + output_dir + "/filter.lisp") command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.output = output_dir assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def shell_execute(command, output, test=None, options=None, data=None, test_split=None): """Excute bigmler command in shell """ command = check_debug(command) world.directory = os.path.dirname(output) world.folders.append(world.directory) try: retcode = check_call(command, shell=True) if retcode < 0: assert False else: if test is not None: world.test_lines = file_number_of_lines(test) world.test_lines -= 1 # prediction file has headers in it, # so first line must be ignored world.prediction_header = options is not None and \ options.find('--prediction-header') > -1 if test_split is not None: data_lines = file_number_of_lines(data) - 1 world.test_lines = int(data_lines * float(test_split)) world.output = output assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_all_mc_resources_from_source(step, max_categories=None, objective=None, test=None, output=None): if max_categories is None or test is None or output is None: assert False world.directory = os.path.dirname(output) world.folders.append(world.directory) test = res_filename(test) try: command = ("bigmler --source " + world.source['resource'] + " --max-categories " + max_categories + " --objective " + objective + " --test " + test + " --store --output " + output) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.test_lines = file_number_of_lines(test) # test file has headers in it, so first line must be ignored world.test_lines -= 1 world.output = output assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_proportional_to_evaluate(step, test=None): if test is None: assert False test = res_filename(test) try: output_dir = world.directory + "_eval/" output = output_dir + os.path.basename(world.output) command = ( "bigmler --evaluate --model " + world.model["resource"] + " --test " + test + " --missing-strategy proportional --output " + output ) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.directory = output_dir world.folders.append(world.directory) world.output = output assert True except OSError as e: assert False
def i_create_resources_and_ensembles_from_dataset( step, multi_label=None, number_of_models=None, test=None, output=None ): if test is None or output is None: assert False world.directory = os.path.dirname(output) world.folders.append(world.directory) multi_label = "" if multi_label is None else " --multi-label " test = res_filename(test) try: command = ( "bigmler " + multi_label + "--dataset " + world.dataset["resource"] + " --number-of-models " + str(number_of_models) + " --test " + test + " --store --output " + output ) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.test_lines = file_number_of_lines(test) # test file has headers in it, so first line must be ignored world.test_lines -= 1 world.output = output assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_all_mc_resources(step, data, max_categories=None, objective=None, test=None, output=None): if max_categories is None or test is None or output is None or objective is None: assert False world.directory = os.path.dirname(output) world.folders.append(world.directory) test = res_filename(test) try: command = ( "bigmler --train " + res_filename(data) + " --max-categories " + max_categories + " --objective " + objective + " --test " + test + " --store --output " + output ) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.test_lines = file_number_of_lines(test) # test file has headers in it, so first line must be ignored world.test_lines -= 1 world.output = output assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_ml_source(step, label_separator=None, number_of_labels=None, data=None, training_separator=None, multi_label_fields=None, objective=None, output_dir=None): if label_separator is None or training_separator is None or number_of_labels is None or data is None or multi_label_fields is None or output_dir is None or objective is None: assert False world.directory = output_dir world.folders.append(world.directory) try: command = ("bigmler --multi-label --train " + res_filename(data) + " --label-separator \"" + label_separator + "\" --training-separator \"" + training_separator + "\" --multi-label-fields " + multi_label_fields + " --objective " + objective + " --store --output-dir " + output_dir + " --no-dataset --no-model --max-batch-models 1") command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.output = output_dir assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def shell_execute(command, output, test=None, options=None, test_rows=None, project=True): """Excute bigmler command in shell """ command = check_debug(command, project=project) world.directory = os.path.dirname(output) world.folders.append(world.directory) try: retcode = check_call(command, shell=True) if retcode < 0: assert False else: if test is not None: world.test_lines = file_number_of_lines(test) if options is None or \ options.find('--projection-header') == -1: # test file has headers in it, so first line must be ignored world.test_lines -= 1 elif test_rows is not None: world.test_lines = test_rows if options is not None and \ options.find('--projection-header') > -1: world.test_lines += 1 elif options is not None and \ options.find('--projection-header') > -1: world.test_lines += 1 world.output = output except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_import_fields(step, summary=None): ok_(summary is not None) try: command = ("bigmler --dataset " + world.dataset['resource'] + " --import-fields " + res_filename(summary) + " --output-dir " + world.directory + " --no-model") command = check_debug(command) retcode = check_call(command, shell=True) ok_(retcode >= 0) except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_with_split_to_evaluate(step, data=None, split=None, output=None): if data is None or split is None or output is None: assert False command = ("bigmler --evaluate --train " + res_filename(data) + " --test-split " + split + " --output " + output) command = check_debug(command) try: retcode = check_call(command, shell=True) ok_(retcode >= 0) world.directory = os.path.dirname(output) world.folders.append(world.directory) world.output = output except OSError as e: assert False
def given_i_create_bigml_resources_using_source_to_evaluate(step, output=None): if output is None: assert False command = ("bigmler --evaluate --source " + world.source['resource'] + " --output " + output) command = check_debug(command) try: retcode = check_call(command, shell=True) ok_(retcode >= 0) world.directory = os.path.dirname(output) world.folders.append(world.directory) world.output = output except OSError as e: assert False
def i_export_the_dataset(step, filename): if filename is None: assert False try: command = ("bigmler --dataset " + world.dataset['resource'] + " --to-csv " + filename + " --output-dir " + world.directory + " --no-model") command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_export_the_dataset(step, filename): if filename is None: assert False try: command = ("bigmler --dataset " + world.dataset['resource'] + " --to-csv " + filename + " --output-dir " + world.directory + " --no-model") command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_ml_evaluations_from_source(step, output=None): if output is None: assert False world.directory = os.path.dirname(output) world.folders.append(world.directory) try: command = "bigmler --multi-label --source " + world.source['resource'] + " --evaluate --store --output " + output retcode = check_call(check_debug(command), shell=True) if retcode < 0: assert False else: world.output = output assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def given_i_create_bigml_resources_using_dataset_to_evaluate(step, output=None): if output is None: assert False command = ("bigmler --evaluate --dataset " + world.dataset['resource'] + " --output " + output) command = check_debug(command) try: retcode = check_call(command, shell=True) ok_(retcode >= 0) world.directory = os.path.dirname(output) world.folders.append(world.directory) world.output = output except OSError as e: assert False
def i_create_all_resources_to_evaluate_with_model_and_map( \ step, data=None, fields_map=None, output=None): if data is None or fields_map is None or output is None: assert False command = ("bigmler --evaluate --test " + res_filename(data) + " --model " + world.model['resource'] + " --output " + output + " --fields-map " + res_filename(fields_map)) command = check_debug(command) try: retcode = check_call(command, shell=True) ok_(retcode >= 0) world.directory = os.path.dirname(output) world.folders.append(world.directory) world.output = output except OSError as e: assert False
def given_i_create_bigml_resources_using_source_to_evaluate(step, output=None): if output is None: assert False command = "bigmler --evaluate --source " + world.source["resource"] + " --output " + output command = check_debug(command) try: retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.directory = os.path.dirname(output) world.folders.append(world.directory) world.output = output assert True except OSError as e: assert False
def i_create_all_ml_resources_for_evaluation(step, tag=None, label_separator=None, number_of_labels=None, data=None, training_separator=None, output=None): if tag is None or label_separator is None or training_separator is None or number_of_labels is None or data is None or output is None: assert False world.directory = os.path.dirname(output) world.folders.append(world.directory) world.number_of_models = int(number_of_labels) try: command = "bigmler --multi-label --train " + res_filename(data) + " --label-separator \"" + label_separator + "\" --training-separator \"" + training_separator + "\" --store --output " + output + " --tag " + tag + " --max-batch-models 1" retcode = check_call(check_debug(command), shell=True) if retcode < 0: assert False else: world.output = output assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_update_dataset_new_properties(step, json_file=None): if json_file is None: assert False json_file = res_filename(json_file) try: command = ("bigmler --dataset " + world.dataset['resource'] + " --no-model --store --output-dir " + world.output + " --dataset-attributes " + json_file) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_update_dataset_new_properties(step, json_file=None): if json_file is None: assert False json_file = res_filename(json_file) try: command = ("bigmler --dataset " + world.dataset['resource'] + " --no-model --store --output-dir " + world.output + " --dataset-attributes " + json_file) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_ml_source( step, label_separator=None, number_of_labels=None, data=None, training_separator=None, multi_label_fields=None, objective=None, output_dir=None, ): if ( label_separator is None or training_separator is None or number_of_labels is None or data is None or multi_label_fields is None or output_dir is None or objective is None ): assert False world.directory = output_dir world.folders.append(world.directory) try: command = ( "bigmler --multi-label --train " + res_filename(data) + ' --label-separator "' + label_separator + '" --training-separator "' + training_separator + '" --multi-label-fields ' + multi_label_fields + " --objective " + objective + " --store --output-dir " + output_dir + " --no-dataset --no-model --max-batch-models 1" ) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.output = output_dir assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_sample(step, options=None, output_dir=None): if options is None or output_dir is None: assert False world.directory = output_dir world.folders.append(world.directory) try: command = (u"bigmler sample --dataset " + world.dataset['resource'] + u" --store --output-dir " + output_dir + u" " + options) command = check_debug(command) retcode = check_call(command.encode(SYSTEM_ENCODING), shell=True) if retcode < 0: assert False else: world.output = output_dir assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_update_project(step, params=None, values=None): ok_(params is not None and values is not None) try: command = (u"bigmler project --project-id " + world.project['resource'] + u" --store --output-dir " + world.directory) for index, param in enumerate(params): value = values[index] command += u" --%s %s" % (param, value) if not PYTHON3: command = command.encode(BIGML_SYS_ENCODING) command = check_debug(command) retcode = check_call(command, shell=True) assert retcode >= 0 except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_filter_field_from_dataset(step, field=None, output_dir=None): if field is None or output_dir is None: assert False try: empty_json = res_filename('data/empty.json') command = ("bigmler --dataset " + world.dataset['resource'] + " --no-model --store --output-dir " + output_dir + " --dataset-fields=\"-" + field + "\""+ " --new-fields " + empty_json) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_dataset_new_fields(step, json_file=None, model_fields=None): if json_file is None or model_fields is None: assert False json_file = res_filename(json_file) try: command = ("bigmler --dataset " + world.dataset['resource'] + " --model-fields \"" + model_fields + "\" --store" + " --output-dir " + world.output + " --new-fields " + json_file) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_dataset(step, data=None, output_dir=None): if data is None or output_dir is None: assert False world.directory = output_dir world.folders.append(world.directory) try: command = (u"bigmler --train " + res_filename(data) + u" --no-model --store --output-dir " + output_dir) command = check_debug(command) retcode = check_call(command.encode(SYSTEM_ENCODING), shell=True) if retcode < 0: assert False else: world.output = output_dir assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_filter_field_from_dataset(step, field=None, output_dir=None): if field is None or output_dir is None: assert False try: empty_json = res_filename('data/empty.json') command = ("bigmler --dataset " + world.dataset['resource'] + " --no-model --store --output-dir " + output_dir + " --dataset-fields=\"-" + field + "\""+ " --new-fields " + empty_json) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_dataset_new_fields(step, json_file=None, model_fields=None): if json_file is None or model_fields is None: assert False json_file = res_filename(json_file) try: command = ("bigmler --dataset " + world.dataset['resource'] + " --model-fields \"" + model_fields + "\" --store" + " --output-dir " + world.output + " --new-fields " + json_file) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_project(step, project=None, output_dir=None): ok_(project is not None) world.directory = output_dir world.folders.append(world.directory) try: command = (u"bigmler project --name \"" + project + u"\" --store --output-dir " + output_dir) if not PYTHON3: command = command.encode(BIGML_SYS_ENCODING) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.output = output_dir except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_dataset(step, data=None, output_dir=None): if data is None or output_dir is None: assert False world.directory = output_dir world.folders.append(world.directory) try: command = (u"bigmler --train " + res_filename(data) + u" --no-model --store --output-dir " + output_dir) command = check_debug(command) retcode = check_call(command.encode(SYSTEM_ENCODING), shell=True) if retcode < 0: assert False else: world.output = output_dir assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_anomaly_with_params_from_dataset( \ step, params=None, output_dir=None): ok_(output_dir is not None and params is not None) world.directory = output_dir world.folders.append(world.directory) try: command = (u"bigmler anomaly --dataset " + world.dataset['resource'] + u" --store --output-dir " + output_dir + u" " + params) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.output = output_dir assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def given_i_create_bigml_resources_using_dataset_to_evaluate_with_model( step, output=None): if output is None: assert False command = ("bigmler --evaluate --dataset " + world.dataset['resource'] + " --model " + world.model['resource'] + " --output " + output) command = check_debug(command) try: retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.directory = os.path.dirname(output) world.folders.append(world.directory) world.output = output assert True except OSError as e: assert False
def i_create_project_in_org(step, name=None, output_dir=None, organization=None): ok_(name is not None and organization is not None) world.directory = output_dir world.folders.append(world.directory) try: command = (u"bigmler project --name \"" + name + "\" --organization " + organization + u" --output-dir " + output_dir) if not PYTHON3: command = command.encode(SYSTEM_ENCODING) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.output = output_dir except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_source_with_org_project(step, data=None, output_dir=None): ok_(data is not None) world.directory = output_dir world.folders.append(world.directory) try: command = (u"bigmler --train " + res_filename(data) + u" --no-model --no-dataset --store --output-dir " + output_dir + u" --org-project " + world.project["resource"]) if not PYTHON3: command = command.encode(BIGML_SYS_ENCODING) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.output = output_dir except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_dataset_from_source(step, output_dir=None): if output_dir is None: assert False world.directory = output_dir world.folders.append(world.directory) try: command = ((u"bigmler --source %s" % world.source['resource']) + u" --no-model --store --output-dir " + output_dir) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.output = output_dir assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_project_in_org(step, name=None, output_dir=None, organization=None): ok_(name is not None and organization is not None) world.directory = output_dir world.folders.append(world.directory) try: command = (u"bigmler project --name \"" + name + "\" --organization " + organization + u" --output-dir " + output_dir) if not PYTHON3: command = command.encode(SYSTEM_ENCODING) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.output = output_dir except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_evaluate_ensemble_with_dataset( \ step, ensemble_dir=None, dataset_dir=None, output=None): if ensemble_dir is None or dataset_dir is None or output is None: assert False world.directory = os.path.dirname(output) world.folders.append(world.directory) ensemble_id = read_id_from_file(os.path.join(ensemble_dir, "ensembles")) dataset_id = read_id_from_file(os.path.join(dataset_dir, "dataset_test")) command = ("bigmler --dataset " + dataset_id + " --ensemble " + ensemble_id + " --store" + " --output " + output + " --evaluate") command = check_debug(command) try: retcode = check_call(command, shell=True) ok_(retcode >= 0) world.output = output except OSError as exc: assert False, str(exc)
def i_update_project(step, params=None, values=None): ok_(params is not None and values is not None) try: command = (u"bigmler project --project-id " + world.project['resource'] + u" --store --output-dir " + world.directory) for index, param in enumerate(params): value = values[index] command += u" --%s %s" % (param, value) if not PYTHON3: command = command.encode(SYSTEM_ENCODING) command = check_debug(command) retcode = check_call(command, shell=True) assert retcode >= 0 except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_dataset_from_source(step, output_dir=None): if output_dir is None: assert False world.directory = output_dir world.folders.append(world.directory) try: command = ((u"bigmler --source %s" % world.source['resource']) + u" --no-model --store --output-dir " + output_dir) command = check_debug(command) retcode = check_call(command.encode(SYSTEM_ENCODING), shell=True) if retcode < 0: assert False else: world.output = output_dir assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_source_with_org_project(step, data=None, output_dir=None): ok_(data is not None) world.directory = output_dir world.folders.append(world.directory) try: command = (u"bigmler --train " + res_filename(data) + u" --no-model --no-dataset --store --output-dir " + output_dir + u" --org-project " + world.project["resource"]) if not PYTHON3: command = command.encode(SYSTEM_ENCODING) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.output = output_dir except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_project(step, project=None, output_dir=None): ok_(project is not None) world.directory = output_dir world.folders.append(world.directory) try: command = (u"bigmler project --name \"" + project + u"\" --store --output-dir " + output_dir) if not PYTHON3: command = command.encode(SYSTEM_ENCODING) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.output = output_dir assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)
def i_create_multi_dataset(step, output_dir): if output_dir is None: assert False world.folders.append(output_dir) datasets_file = "%s%sdataset" % (world.directory, os.sep) try: command = ("bigmler --datasets " + datasets_file + " --multi-dataset --no-model --store --output-dir " + output_dir) command = check_debug(command) retcode = check_call(command, shell=True) if retcode < 0: assert False else: world.directory = output_dir world.output = output_dir assert True except (OSError, CalledProcessError, IOError) as exc: assert False, str(exc)