def main(args=sys.argv[1:]): """Main process """ if args: if not args[0].lower() in SUBCOMMANDS: new_args = ["main"] new_args.extend(args) else: new_args = args # checks if the old --delete syntax is used new_args = check_delete_option(new_args) new_args = [arg.decode(SYSTEM_ENCODING) for arg in new_args] if new_args[0] == "main": main_dispatcher(args=new_args) elif new_args[0] == "analyze": analyze_dispatcher(args=new_args) elif new_args[0] == "cluster": cluster_dispatcher(args=new_args) elif new_args[0] == "anomaly": anomaly_dispatcher(args=new_args) elif new_args[0] == "delete": delete_dispatcher(args=new_args) else: sys.exit("BigMLer used with no arguments. Check:\nbigmler --help\n\nor" "\n\nbigmler analyze --help\n\n" "\n\nbigmler cluster --help\n\n" "\n\nbigmler anomaly --help\n\n" "\n\nbigmler delete --help\n\n" " for a list of options")
def main(args=sys.argv[1:]): """Main process """ if args: if not args[0].lower() in SUBCOMMANDS: new_args = ["main"] new_args.extend(args) else: new_args = args # checks if the old --delete syntax is used new_args = check_delete_option(new_args) if not PYTHON3: new_args = [arg.decode(SYSTEM_ENCODING) for arg in new_args] if new_args[0] == "main": main_dispatcher(args=new_args) elif new_args[0] == "analyze": analyze_dispatcher(args=new_args) elif new_args[0] == "cluster": cluster_dispatcher(args=new_args) elif new_args[0] == "anomaly": anomaly_dispatcher(args=new_args) elif new_args[0] == "sample": sample_dispatcher(args=new_args) elif new_args[0] == "report": report_dispatcher(args=new_args) elif new_args[0] == "reify": reify_dispatcher(args=new_args) elif new_args[0] == "execute": execute_dispatcher(args=new_args) elif new_args[0] == "delete": delete_dispatcher(args=new_args) elif new_args[0] == "project": project_dispatcher(args=new_args) elif new_args[0] == "association": association_dispatcher(args=new_args) elif new_args[0] == "logistic-regression": logistic_regression_dispatcher(args=new_args) elif new_args[0] == "whizzml": whizzml_dispatcher(args=new_args) else: sys.exit( "BigMLer used with no arguments. Check:\nbigmler --help\n\nor" "\n\nbigmler sample --help\n\n" "\n\nbigmler analyze --help\n\n" "\n\nbigmler cluster --help\n\n" "\n\nbigmler anomaly --help\n\n" "\n\nbigmler report --help\n\n" "\n\nbigmler reify --help\n\n" "\n\nbigmler project --help\n\n" "\n\nbigmler association --help\n\n" "\n\nbigmler logistic-regression --help\n\n" "\n\nbigmler execute --help\n\n" "\n\nbigmler whizzml --help\n\n" "\n\nbigmler delete --help\n\n" " for a list of options" )
def main(args=sys.argv[1:]): """Main process """ if args: if not args[0].lower() in SUBCOMMANDS: new_args = ["main"] new_args.extend(args) else: new_args = args # checks if the old --delete syntax is used new_args = check_delete_option(new_args) if not PYTHON3: new_args = [arg.decode(SYSTEM_ENCODING) for arg in new_args] if new_args[0] == "main": main_dispatcher(args=new_args) elif new_args[0] == "analyze": analyze_dispatcher(args=new_args) elif new_args[0] == "cluster": cluster_dispatcher(args=new_args) elif new_args[0] == "anomaly": anomaly_dispatcher(args=new_args) elif new_args[0] == "sample": sample_dispatcher(args=new_args) elif new_args[0] == "report": report_dispatcher(args=new_args) elif new_args[0] == "reify": reify_dispatcher(args=new_args) elif new_args[0] == "execute": execute_dispatcher(args=new_args) elif new_args[0] == "delete": delete_dispatcher(args=new_args) elif new_args[0] == "project": project_dispatcher(args=new_args) elif new_args[0] == "association": association_dispatcher(args=new_args) elif new_args[0] == "logistic-regression": logistic_regression_dispatcher(args=new_args) elif new_args[0] == "whizzml": whizzml_dispatcher(args=new_args) else: sys.exit("BigMLer used with no arguments. Check:\nbigmler --help\n\nor" "\n\nbigmler sample --help\n\n" "\n\nbigmler analyze --help\n\n" "\n\nbigmler cluster --help\n\n" "\n\nbigmler anomaly --help\n\n" "\n\nbigmler report --help\n\n" "\n\nbigmler reify --help\n\n" "\n\nbigmler project --help\n\n" "\n\nbigmler association --help\n\n" "\n\nbigmler logistic-regression --help\n\n" "\n\nbigmler execute --help\n\n" "\n\nbigmler whizzml --help\n\n" "\n\nbigmler delete --help\n\n" " for a list of options")
def main(args=sys.argv[1:]): """Main process """ if args: if not args[0].lower() in SUBCOMMANDS: new_args = ["main"] new_args.extend(args) else: new_args = args if new_args[0] == "main": main_dispatcher(args=new_args) elif new_args[0] == "analyze": analyze_dispatcher(args=new_args) else: sys.exit("BigMLer used with no arguments. Check:\nbigmler --help\n\nor" "\n\nbigmler analyze --help\n\n for a list of options")
def create_prediction_dataset(base_path, folder, args, resume): """Creates batch prediction datasets and a multidataset with the prediction results for the best scoring model in the folder set by the argument """ args.output_dir = os.path.join(base_path, "%s_pred" % folder) folder = os.path.join(base_path, folder) model_type = "ensembles" if hasattr(args, "number_of_models") and \ args.number_of_models > 1 else "models" global subcommand_list # creating the predictions CSV file command = COMMANDS["prediction"] % (base_path, model_type, folder, model_type, folder) command_args = command.split() if resume: next_command = subcommand_list.pop() if different_command(next_command, command): resume = False u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) elif not subcommand_list: main_dispatcher(args=['main', '--resume']) resume = False else: u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) return resume
def create_prediction_dataset(base_path, folder, args, resume): """Creates batch prediction datasets and a multidataset with the prediction results for the best scoring model in the folder set by the argument """ args.output_dir = os.path.join(base_path, "%s_pred" % folder) output_dir = args.output_dir folder = os.path.join(base_path, folder) model_type = "ensembles" if hasattr(args, "number_of_models") and \ args.number_of_models > 1 else "models" global subcommand_list # creating the predictions CSV file command = COMMANDS["prediction"] % (base_path, model_type, folder, model_type, folder) command_args = command.split() if resume: next_command = subcommand_list.pop() if different_command(next_command, command): resume = False u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) elif not subcommand_list: main_dispatcher(args=['main', '--resume']) resume = False else: u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) return resume
def create_kfold_datasets(dataset, args, selecting_file_list, common_options, resume=False): """Calling the bigmler procedure to create the k-fold datasets """ args.output_dir = os.path.normpath(os.path.join(args.output_dir, "test")) output_dir = args.output_dir global subcommand_list # creating the selecting datasets for index in range(0, len(selecting_file_list)): command = COMMANDS["selection"] % ( dataset, selecting_file_list[index], output_dir) command_args = command.split() common_options_list = u.get_options_list(args, common_options, prioritary=command_args) command_args.extend(common_options_list) command = rebuild_command(command_args) if resume: next_command = subcommand_list.pop() if different_command(next_command, command): resume = False u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) elif not subcommand_list: main_dispatcher(args=['main', '--resume']) resume = False else: u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) datasets_file = os.path.normpath(os.path.join(output_dir, "dataset_gen")) return datasets_file, resume
def create_kfold_datasets(dataset, args, selecting_file_list, command_obj, resume=False): """Calling the bigmler procedure to create the k-fold datasets """ args.output_dir = os.path.normpath(os.path.join(args.output_dir, "test")) output_dir = args.output_dir global subcommand_list # creating the selecting datasets for index in range(0, len(selecting_file_list)): command = COMMANDS["selection"] % (dataset, selecting_file_list[index], output_dir) command_args = command.split() command_obj.propagate(command_args) command = rebuild_command(command_args) if resume: next_command = subcommand_list.pop() if different_command(next_command, command): resume = False u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) elif not subcommand_list: main_dispatcher(args=['main', '--resume']) resume = False else: u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) datasets_file = os.path.normpath(os.path.join(output_dir, "dataset_gen")) return datasets_file, resume
def create_kfold_evaluations(datasets_file, args, command_obj, resume=False, counter=0): """ Create k-fold cross-validation from a datasets file """ global subcommand_list output_dir = os.path.normpath( u.check_dir( os.path.join(u"%s%s" % (args.output_dir, counter), u"evaluation.json"))) model_fields = args.model_fields name_suffix = "_subset_%s" % counter name_max_length = NAME_MAX_LENGTH - len(name_suffix) name = "%s%s" % (args.name[0:name_max_length], name_suffix) dataset_id = u.read_datasets(datasets_file)[0] model_dataset = os.path.normpath( os.path.join(u.check_dir(datasets_file), dataset_id.replace("/", "_"))) command = COMMANDS["create_cv"] % (datasets_file, output_dir, name, model_dataset) command_args = command.split() if model_fields: command_args.append("--model-fields") command_args.append(model_fields) command_args.append("--objective") command_args.append(args.objective_field) command_args = add_model_options(command_args, args) """ common_options_list = u.get_options_list(args, command_obj.common_options, prioritary=command_args) command_args.extend(common_options_list) """ command_obj.propagate( command_args, exclude=["--dataset", "--datasets", "--dataset-file"]) command = rebuild_command(command_args) if resume: next_command = subcommand_list.pop() if different_command(next_command, command): resume = False u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) elif not subcommand_list: main_dispatcher(args=['main', '--resume']) resume = False else: u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) evaluation_file = os.path.normpath( os.path.join(output_dir, "evaluation.json")) try: with open(evaluation_file) as evaluation_handler: evaluation = json.loads(evaluation_handler.read()) return evaluation, resume except (ValueError, IOError): sys.exit("Failed to retrieve evaluation.")
def create_kfold_evaluations(datasets_file, args, command_obj, resume=False, counter=0): """ Create k-fold cross-validation from a datasets file """ global subcommand_list output_dir = os.path.normpath( u.check_dir(os.path.join(u"%s%s" % (args.output_dir, counter), u"evaluation.json"))) model_fields = args.model_fields name_suffix = "_subset_%s" % counter name_max_length = NAME_MAX_LENGTH - len(name_suffix) name = "%s%s" % (args.name[0: name_max_length], name_suffix) dataset_id = u.read_datasets(datasets_file)[0] model_dataset = os.path.normpath( os.path.join(u.check_dir(datasets_file), dataset_id.replace("/", "_"))) command = COMMANDS["create_cv"] % (datasets_file, output_dir, name, model_dataset) command_args = command.split() if model_fields: command_args.append("--model-fields") command_args.append(model_fields) command_args.append("--objective") command_args.append(args.objective_field) command_args = add_model_options(command_args, args) """ common_options_list = u.get_options_list(args, command_obj.common_options, prioritary=command_args) command_args.extend(common_options_list) """ command_obj.propagate(command_args, exclude=["--dataset", "--datasets", "--dataset-file"]) command = rebuild_command(command_args) if resume: next_command = subcommand_list.pop() if different_command(next_command, command): resume = False u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) elif not subcommand_list: main_dispatcher(args=['main', '--resume']) resume = False else: u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) evaluation_file = os.path.normpath(os.path.join(output_dir, "evaluation.json")) try: with open(evaluation_file) as evaluation_handler: evaluation = json.loads(evaluation_handler.read()) return evaluation, resume except (ValueError, IOError): sys.exit("Failed to retrieve evaluation.")
def create_candidates_evaluations(datasets_file, args, command_obj, resume=False, random_candidates=DEFAULT_MIN_CANDIDATES): """ Create random candidates ensembles evaluations """ global subcommand_list output_dir = os.path.normpath( u.check_dir( os.path.join(u"%s%s" % (args.output_dir, random_candidates), "evaluation.json"))) command = COMMANDS["random_candidates"] % (datasets_file, random_candidates, output_dir) command_args = command.split() """ common_options_list = u.get_options_list(args, command_obj.common_options, prioritary=command_args) command_args.extend(common_options_list) """ command_args.append("--objective") command_args.append(args.objective_field) command_args = add_model_options(command_args, args) command_obj.propagate( command_args, exclude=["--dataset", "--datasets", "--dataset-file"]) command = rebuild_command(command_args) if resume: next_command = subcommand_list.pop() if different_command(next_command, command): resume = False u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) elif not subcommand_list: main_dispatcher(args=['main', '--resume']) resume = False else: u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) evaluation_file = os.path.normpath( os.path.join(output_dir, "evaluation.json")) try: with open(evaluation_file, u.open_mode("r")) as evaluation_handler: evaluation = json.loads(evaluation_handler.read()) return evaluation, resume except (ValueError, IOError): sys.exit("Failed to retrieve evaluation.")
def create_candidates_evaluations(datasets_file, args, command_obj, resume=False, random_candidates=DEFAULT_MIN_CANDIDATES): """ Create random candidates ensembles evaluations """ global subcommand_list output_dir = os.path.normpath(u.check_dir( os.path.join(u"%s%s" % (args.output_dir, random_candidates), "evaluation.json"))) command = COMMANDS["random_candidates"] % ( datasets_file, random_candidates, output_dir) command_args = command.split() """ common_options_list = u.get_options_list(args, command_obj.common_options, prioritary=command_args) command_args.extend(common_options_list) """ command_args.append("--objective") command_args.append(args.objective_field) command_args = add_model_options(command_args, args) command_obj.propagate(command_args, exclude=["--dataset", "--datasets", "--dataset-file"]) command = rebuild_command(command_args) if resume: next_command = subcommand_list.pop() if different_command(next_command, command): resume = False u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) elif not subcommand_list: main_dispatcher(args=['main', '--resume']) resume = False else: u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) evaluation_file = os.path.normpath(os.path.join(output_dir, "evaluation.json")) try: with open(evaluation_file, u.open_mode("r")) as evaluation_handler: evaluation = json.loads(evaluation_handler.read()) return evaluation, resume except (ValueError, IOError): sys.exit("Failed to retrieve evaluation.")
def create_node_th_evaluations(datasets_file, args, common_options, resume=False, node_threshold=DEFAULT_MIN_NODES): """ Create node_threshold evaluations """ global subcommand_list output_dir = os.path.normpath( u.check_dir( os.path.join(u"%s%s" % (args.output_dir, node_threshold), "evaluation.json"))) command = COMMANDS["node_threshold"] % (datasets_file, node_threshold, output_dir) command_args = command.split() common_options_list = u.get_options_list(args, common_options, prioritary=command_args) command_args.extend(common_options_list) command_args.append("--objective") command_args.append(args.objective_field) command_args = add_model_options(command_args, args) command = rebuild_command(command_args) if resume: next_command = subcommand_list.pop() if different_command(next_command, command): resume = False u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) elif not subcommand_list: main_dispatcher(args=['main', '--resume']) resume = False else: u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) evaluation_file = os.path.normpath( os.path.join(output_dir, "evaluation.json")) try: with open(evaluation_file, u.open_mode("r")) as evaluation_handler: evaluation = json.loads(evaluation_handler.read()) return evaluation, resume except (ValueError, IOError): sys.exit("Failed to retrieve evaluation.")
def create_kfold_evaluations(datasets_file, args, common_options, resume=False, counter=0): """ Create k-fold cross-validation from a datasets file """ global subcommand_list output_dir = u.check_dir(os.path.join("%s%s" % (args.output_dir, counter), "evaluation.json")) model_fields = args.model_fields name_suffix = "_subset_%s" % counter name_max_length = NAME_MAX_LENGTH - len(name_suffix) name = "%s%s" % (args.name[0: name_max_length] , name_suffix) command = COMMANDS["create_cv"] % (datasets_file, output_dir, name) command_args = command.split() if model_fields: command_args.append("--model-fields") command_args.append(model_fields) common_options_list = u.get_options_list(args, common_options, prioritary=command_args) command_args.extend(common_options_list) command = " ".join(command_args) if resume: next_command = subcommand_list.pop().strip() if next_command != command: resume = False u.log_message("%s\n" % command, log_file=subcommand_file, console=False) main_dispatcher(args=command_args) elif not subcommand_list: main_dispatcher(args=['main', '--resume']) resume = False else: u.log_message("%s\n" % command, log_file=subcommand_file, console=False) main_dispatcher(args=command_args) evaluation_file = os.path.join(output_dir, "evaluation.json") try: with open(evaluation_file) as evaluation_handler: evaluation = json.loads(evaluation_handler.read()) return evaluation, resume except (ValueError, IOError): sys.exit("Failed to retrieve evaluation.")
def create_node_th_evaluations(datasets_file, args, common_options, resume=False, node_threshold=DEFAULT_MIN_NODES): """ Create node_threshold evaluations """ global subcommand_list output_dir = os.path.normpath(u.check_dir( os.path.join(u"%s%s" % (args.output_dir, node_threshold), "evaluation.json"))) command = COMMANDS["node_threshold"] % ( datasets_file, node_threshold, output_dir) command_args = command.split() common_options_list = u.get_options_list(args, common_options, prioritary=command_args) command_args.extend(common_options_list) command_args.append("--objective") command_args.append(args.objective_field) command_args = add_model_options(command_args, args) command = rebuild_command(command_args) if resume: next_command = subcommand_list.pop() if different_command(next_command, command): resume = False u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) elif not subcommand_list: main_dispatcher(args=['main', '--resume']) resume = False else: u.sys_log_message(command, log_file=subcommand_file) main_dispatcher(args=command_args) evaluation_file = os.path.normpath(os.path.join(output_dir, "evaluation.json")) try: with open(evaluation_file, u.open_mode("r")) as evaluation_handler: evaluation = json.loads(evaluation_handler.read()) return evaluation, resume except (ValueError, IOError): sys.exit("Failed to retrieve evaluation.")
def create_node_th_evaluations(datasets_file, args, common_options, resume=False, node_threshold=DEFAULT_MIN_NODES): """ Create node_threshold evaluations """ global subcommand_list output_dir = u.check_dir( os.path.join("%s%s" % (args.output_dir, node_threshold), "evaluation.json")) command = COMMANDS["node_threshold"] % ( datasets_file, node_threshold, output_dir) command_args = command.split() common_options_list = u.get_options_list(args, common_options, prioritary=command_args) command_args.extend(common_options_list) command = " ".join(command_args) if resume: next_command = subcommand_list.pop().strip() if next_command != command: resume = False u.log_message("%s\n" % command, log_file=subcommand_file, console=False) main_dispatcher(args=command_args) elif not subcommand_list: main_dispatcher(args=['main', '--resume']) resume = False else: u.log_message("%s\n" % command, log_file=subcommand_file, console=False) main_dispatcher(args=command_args) evaluation_file = os.path.join(output_dir, "evaluation.json") try: with open(evaluation_file) as evaluation_handler: evaluation = json.loads(evaluation_handler.read()) return evaluation, resume except (ValueError, IOError): sys.exit("Failed to retrieve evaluation.")
def main(args=sys.argv[1:]): """Main process """ if args: if not args[0].lower() in SUBCOMMANDS: new_args = ["main"] new_args.extend(args) else: new_args = args # checks if the old --delete syntax is used new_args = check_delete_option(new_args) if not PYTHON3: new_args = [arg.decode(SYSTEM_ENCODING) for arg in new_args] if new_args[0] == "main": main_dispatcher(args=new_args) elif new_args[0] == "analyze": analyze_dispatcher(args=new_args) elif new_args[0] == "cluster": cluster_dispatcher(args=new_args) elif new_args[0] == "anomaly": anomaly_dispatcher(args=new_args) elif new_args[0] == "sample": sample_dispatcher(args=new_args) elif new_args[0] == "report": report_dispatcher(args=new_args) elif new_args[0] == "reify": reify_dispatcher(args=new_args) elif new_args[0] == "execute": execute_dispatcher(args=new_args) elif new_args[0] == "delete": delete_dispatcher(args=new_args) elif new_args[0] == "project": project_dispatcher(args=new_args) elif new_args[0] == "association": association_dispatcher(args=new_args) elif new_args[0] == "logistic-regression": logistic_regression_dispatcher(args=new_args) elif new_args[0] == "topic-model": if no_stemmer: sys.exit("To use the bigmler topic-model command you need the" " Pystemmer library. Please, install it and" " retry your command.") topic_model_dispatcher(args=new_args) elif new_args[0] == "time-series": time_series_dispatcher(args=new_args) elif new_args[0] == "deepnet": deepnet_dispatcher(args=new_args) elif new_args[0] == "whizzml": whizzml_dispatcher(args=new_args) elif new_args[0] == "export": export_dispatcher(args=new_args) elif new_args[0] == "retrain": retrain_dispatcher(args=new_args) else: sys.exit("BigMLer used with no arguments. Check:\nbigmler --help\n\nor" "\n\nbigmler sample --help\n\n" "\n\nbigmler analyze --help\n\n" "\n\nbigmler cluster --help\n\n" "\n\nbigmler anomaly --help\n\n" "\n\nbigmler report --help\n\n" "\n\nbigmler reify --help\n\n" "\n\nbigmler project --help\n\n" "\n\nbigmler association --help\n\n" "\n\nbigmler logistic-regression --help\n\n" "\n\nbigmler topic-model --help\n\n" "\n\nbigmler time-series --help\n\n" "\n\nbigmler deepnet --help\n\n" "\n\nbigmler execute --help\n\n" "\n\nbigmler whizzml --help\n\n" "\n\nbigmler export --help\n\n" "\n\nbigmler retrain --help\n\n" "\n\nbigmler delete --help\n\n" " for a list of options")
def create_kfold_datasets(dataset, args, selecting_file_list, objective, kfold_field, common_options, resume=False): """Calling the bigmler procedure to create the k-fold datasets """ args.output_dir = os.path.join(args.output_dir, "test") output_dir = args.output_dir k = args.k_folds global subcommand_list # creating the selecting datasets for index in range(0, len(selecting_file_list)): command = COMMANDS["selection"] % ( dataset, selecting_file_list[index], output_dir) command_args = command.split() common_options_list = u.get_options_list(args, common_options, prioritary=command_args) command_args.extend(common_options_list) command = " ".join(command_args) if resume: next_command = subcommand_list.pop().strip() if next_command != command: resume = False u.log_message("%s\n" % command, log_file=subcommand_file, console=False) main_dispatcher(args=command_args) elif not subcommand_list: main_dispatcher(args=['main', '--resume']) resume = False else: u.log_message("%s\n" % command, log_file=subcommand_file, console=False) main_dispatcher(args=command_args) # updating the datasets to set the objective field datasets_file = os.path.join(output_dir, "dataset_gen") with open(datasets_file) as datasets_handler: for line in datasets_handler: dataset_id = line.strip() command = COMMANDS["objective"] % (dataset_id, "dataset_%s" % index, output_dir) command_args = command.split() command_args.append("--objective") command_args.append(objective) common_options_list = u.get_options_list(args, common_options, prioritary=command_args) command_args.extend(common_options_list) command = " ".join(command_args) if resume: next_command = subcommand_list.pop().strip() if next_command != command: resume = False u.log_message("%s\n" % command, log_file=subcommand_file, console=False) main_dispatcher(args=command_args) elif not subcommand_list: main_dispatcher(args=['main', '--resume']) resume = False else: u.log_message("%s\n" % command, log_file=subcommand_file, console=False) main_dispatcher(args=command_args) return datasets_file, resume