def describe_studie(args): client = AdvisorClient() study = client.get_study_by_name(args.study_name) # Print study table = PrettyTable() table.field_names = [ "Id", "Name", "Algorithm", "Status", "Create", "Updated" ] table.add_row([ study.id, study.name, study.algorithm, study.status, study.created_time, study.updated_time ]) print(table) # Print study configuration """ table = PrettyTable() table.field_names = ["Configuration"] table.add_row([study.study_configuration]) print(table) """ pprint.pprint(json.loads(study.study_configuration)) # Print related trials study_trials = client.list_trials(args.study_name) if (len(study_trials)) > 0: print_trials_as_table(study_trials)
def main(_): curr_dir = os.path.abspath(os.path.dirname(__file__)) train_path = os.path.join( curr_dir, '../../preprocess/output/train_shuffle_' + str(FLAGS.thresh_hold) + '.csv') valid_path = os.path.join( curr_dir, '../../preprocess/output/valid_raw_' + str(FLAGS.thresh_hold) + '.csv') warm_start_dir = os.path.join(curr_dir, 'logdir') train_input_fn = get_input_fn(train_path, 4096, 1, 10000) valid_input_fn = get_input_fn(valid_path, 4096, 1, 10000) client = AdvisorClient() alpha_list = [0.00001, 0.0001, 0.001, 0.01] params = {'alpha': 0, 'feature_dim': get_feature_dim(FLAGS.thresh_hold)} # ws = tf.estimator.WarmStartSettings( # ckpt_to_initialize_from=warm_start_dir, # vars_to_warm_start=".*input_layer.*") ws = None for alpha in alpha_list: params['alpha'] = alpha model_dir = os.path.join(curr_dir, 'logdir_s' + str(alpha)) model = MyModel(model_dir=model_dir, params=params, warm_start_from=ws) for _ in range(3): model.train(input_fn=train_input_fn) model.evaluate(input_fn=valid_input_fn)
def main(unused_argv): client = AdvisorClient() with open(FLAGS.config, "r") as cfg: config = json.load(cfg) assert "study_name" in config, "The config needs a 'study_name' parameter (str)" study_name = config.pop("study_name") study_config = config.pop("hyper-parameter-config") algorithm = config.pop("algorithm") fixed_parameter_config = config.pop("fixed-parameter-config") exp_dir = config.pop("study_dir") if not os.path.isdir(os.path.join(exp_dir, study_name)): os.mkdir(os.path.join(exp_dir, study_name)) assert algorithm in ALGORITHMS, "algorithm {} is not supported. Select one of {}".format( algorithm, ", ".join([algo for algo in ALGORITHMS])) study = client.get_or_create_study(study_name=study_name, study_configuration=study_config) print(study) max_trials = study_config["maxTrials"] for i in range(max_trials): trial = client.get_suggestions(study.name, 1)[0] parameter_value_dict = json.loads(trial.parameter_values) parameter_value_dict["skip_gru_units"] = int( parameter_value_dict["skip_gru_units"]) parameter_value_dict["gru_units"] = int( parameter_value_dict["gru_units"]) parameter_value_dict["cnn_batch_normalization"] = int( parameter_value_dict["cnn_batch_normalization"]) merged_config = {**fixed_parameter_config, **parameter_value_dict} while os.path.isdir( os.path.join(exp_dir, study_name, "trial_{}".format(i))): i += 1 os.mkdir(os.path.join(exp_dir, study_name, "trial_{}").format(i)) merged_config["model_dir"] = os.path.join(exp_dir, study_name, "trial_{}".format(i)) manager = Manager(**merged_config) ret = manager.run() corr_score = ret["corr"] rmse_score = ret["rmse"] metric = rmse_score + (1 - corr_score) trial = client.complete_trial_with_one_metric(trial, metric) print(trial) best_trial = client.get_best_trial(study.name) print("Best trial: {}".format(best_trial))
def main(train_function): client = AdvisorClient() # Get or create the study study_configuration = { "goal": "MINIMIZE", "randomInitTrials": 3, "maxTrials": 5, "maxParallelTrials": 1, "params": [ { "parameterName": "min_child_weight", "type": "INTEGER", "minValue": 1, "maxValue": 20, "feasiblePoints": "", "scallingType": "LINEAR" }, { "parameterName": "max_depth", "type": "INTEGER", "minValue": 5, "maxValue": 15, "feasiblePoints": "", "scallingType": "LINEAR" }, { "parameterName": "gamma", "type": "DOUBLE", "minValue": 0.0, "maxValue": 10.0, "feasiblePoints": "", "scallingType": "LINEAR" }, { "parameterName": "alpha", "type": "DOUBLE", "minValue": 0.0, "maxValue": 10.0, "feasiblePoints": "", "scallingType": "LINEAR" }, ] } study = client.create_study("Study", study_configuration, "BayesianOptimization") #study = client.get_study_by_id(21) # Get suggested trials trials = client.get_suggestions(study.id, 3) # Generate parameters parameter_value_dicts = [] for trial in trials: parameter_value_dict = json.loads(trial.parameter_values) print("The suggested parameters: {}".format(parameter_value_dict)) parameter_value_dicts.append(parameter_value_dict) # Run training metrics = [] for i in range(len(trials)): metric = train_function(**parameter_value_dicts[i]) metrics.append(metric) # Complete the trial for i in range(len(trials)): trial = trials[i] client.complete_trial_with_one_metric(trial, metrics[i]) is_done = client.is_study_done(study.id) best_trial = client.get_best_trial(study.id) print("The study: {}, best trial: {}".format(study, best_trial))
#!/usr/bin/env python import json from advisor_client.model import Study from advisor_client.model import Trial from advisor_client.model import TrialMetric from advisor_client.client import AdvisorClient client = AdvisorClient() # Create Study name = "Study" maxTrials = 20 study_configuration = { #"goal": # "MAXIMIZE", "goal": "MINIMIZE", "maxTrials": maxTrials, "maxParallelTrials": 1, "randomInitTrials": 10, "params": [{ "parameterName": "x", "type": "DOUBLE", "minValue": -10, "maxValue": 10, "scallingType": "LINEAR"
def main(train_function): client = AdvisorClient() # Get or create the study study_configuration = { "goal": "MINIMIZE", "randomInitTrials": 1, "maxTrials": 5, "maxParallelTrials": 1, "params": [ { "parameterName": "gamma", "type": "DOUBLE", "minValue": 0.001, "maxValue": 0.01, "feasiblePoints": "", "scalingType": "LINEAR" }, { "parameterName": "C", "type": "DOUBLE", "minValue": 0.5, "maxValue": 1.0, "feasiblePoints": "", "scalingType": "LINEAR" }, { "parameterName": "kernel", "type": "CATEGORICAL", "minValue": 0, "maxValue": 0, "feasiblePoints": "linear, poly, rbf, sigmoid, precomputed", "scalingType": "LINEAR" }, { "parameterName": "coef0", "type": "DOUBLE", "minValue": 0.0, "maxValue": 0.5, "feasiblePoints": "", "scalingType": "LINEAR" }, ] } study = client.create_study("Study", study_configuration, "BayesianOptimization") #study = client.get_study_by_id(6) num_trials = 20 for i in range(num_trials): # Get suggested trials trials = client.get_suggestions(study.name, 3) # Generate parameters parameter_value_dicts = [] for trial in trials: parameter_value_dict = json.loads(trial.parameter_values) print("The suggested parameters: {}".format(parameter_value_dict)) parameter_value_dicts.append(parameter_value_dict) # Run training metrics = [] for i in range(len(trials)): metric = train_function(**parameter_value_dicts[i]) #metric = train_function(parameter_value_dicts[i]) metrics.append(metric) # Complete the trial for i in range(len(trials)): trial = trials[i] client.complete_trial_with_one_metric(trial, metrics[i]) is_done = client.is_study_done(study.name) best_trial = client.get_best_trial(study.name) print("The study: {}, best trial: {}".format(study, best_trial)) print(best_trial.parameter_values)
def main(): client = AdvisorClient() # Create the study name = "Study" study_configuration = { "goal": "MINIMIZE", "maxTrials": 5, "maxParallelTrials": 1, "params": [{ "parameterName": "learning_rate", "type": "INTEGER", "minValue": 0.01, "maxValue": 0.1, "scallingType": "LINEAR" }] } study = client.create_study(name, study_configuration) # Get suggested trials trials = client.get_suggestions(study.id, 3) # Generate command-line parameters commandline_parameters = [] i = 0 for trial in trials: parameter = "--output_path=output/{}".format(i) parameter_value_dict = json.loads(trial.parameter_values) # Example: {"learning_rate": 0.05943265431983244} for k, v in parameter_value_dict.items(): parameter += " --{}={}".format(k, v) print(parameter) commandline_parameters.append(parameter) i += 1 # Run training for i in range(3): module_args = commandline_parameters[i] module_name = "trainer.task" # Example: python -m trainer.task --output_path=0 --learning_rate=0.0796523079087 shell_command = "python -m {} {}".format(module_name, module_args) print(shell_command) subprocess.call(shell_command, shell=True) # Complete the trial for i in range(3): trial = trials[i] logdir = "output/{}".format(i) tensorboard_metrics = tensorboard_util.get_hyperparameters_metric( logdir) client.complete_trial(trial, tensorboard_metrics) # Check if study done is_done = client.is_study_done(study.id) best_trial = client.get_best_trial(study.id) print("The study: {}, if it is done: {}, best trial: {}".format( study, is_done, best_trial))
def run(self): client = AdvisorClient() self.run_config_dict # TODO: move the logic into local runner runner = LocalRunner() if "runner" in self.run_config_dict: if self.run_config_dict["runner"] == "local_runner": runner = LocalRunner() logging.info("Run with local runner") study_name = self.run_config_dict["name"].encode("utf-8") study = client.get_or_create_study(study_name, self.run_config_dict["search_space"], self.run_config_dict["algorithm"]) logging.info("Create study: {}".format(study)) for i in range(self.run_config_dict["trialNumber"]): logging.info("-------------------- Start Trial --------------------") # Get suggested trials trials = client.get_suggestions(study.name, 1) logging.info("Get trial: {}".format(trials[0])) #import ipdb;ipdb.set_trace() # Generate parameters parameter_value_dicts = [] for trial in trials: parameter_value_dict = json.loads(trial.parameter_values) logging.info( "The suggested parameters: {}".format(parameter_value_dict)) parameter_value_dicts.append(parameter_value_dict) # Run training for trial in trials: #metric = train_function(**parameter_value_dicts[i]) # Example: {"gamma": 0.0063987614450157415} parameters_dict = json.loads(trials[0].parameter_values) parameter_string = "" for k, v in parameters_dict.items(): parameter_string += " -{}={}".format(k, v) command_string = "cd {} && {} {}".format( self.run_config_dict["path"], self.run_config_dict["command"], parameter_string) #exit_code = subprocess.call(command_string, shell=True) logging.info("Run the command: {}".format(command_string)) # Example: '0.0\n' # Example: 'Compute y = x * x - 3 * x + 2\nIput x is: 1.0\nOutput is: 0.0\n0.0\n' command_output = subprocess.check_output(command_string, shell=True) # TODO: Log the output in the directory #logging.info("Get output of command: {}".format(command_output)) metric = float(command_output.split("\n")[-2].strip()) # Complete the trial client.complete_trial_with_one_metric(trial, metric) logging.info("Update the trial with metrics: {}".format(metric)) logging.info("--------------------- End Trial ---------------------") is_done = client.is_study_done(study.name) best_trial = client.get_best_trial(study.name) logging.info("The study: {}, best trial: {}".format(study, best_trial))
def main(): client = AdvisorClient() model = MyModel() model.load_dataset() # Get or create the study study_configuration = { "goal": "MINIMIZE", "randomInitTrials": 5, "maxTrials": 30, "maxParallelTrials": 1, "params": [ { "parameterName": "max_bin", "type": "INTEGER", "minValue": 63, "maxValue": 511, "feasiblePoints": "", "scallingType": "LINEAR" }, { "parameterName": "bin_construct_sample_cnt", "type": "INTEGER", "minValue": 3, "maxValue": 1000, "feasiblePoints": "", "scallingType": "LINEAR" }, { "parameterName": "num_leaves", "type": "INTEGER", "minValue": 63, "maxValue": 512, "feasiblePoints": "", "scallingType": "LINEAR" }, # { # "parameterName": "lambda_l2", # "type": "DOUBLE", # "minValue": 0.0, # "maxValue": 10.0, # "feasiblePoints": "", # "scallingType": "LINEAR" # }, # { # "parameterName": "lambda_l1", # "type": "DOUBLE", # "minValue": 0.0, # "maxValue": 10.0, # "feasiblePoints": "", # "scallingType": "LINEAR" # }, { "parameterName": "cat_l2", "type": "DOUBLE", "minValue": 1.0, "maxValue": 100.0, "feasiblePoints": "", "scallingType": "LINEAR" }, { "parameterName": "cat_smooth", "type": "DOUBLE", "minValue": 1.0, "maxValue": 100.0, "feasiblePoints": "", "scallingType": "LINEAR" }, ] } # study = client.create_study("lightgbm_search", study_configuration, # "BayesianOptimization") study = client.get_study_by_id(44) # Get suggested trials # trials = client.get_suggestions(study.id, 5) # for i in range(5): # trial = trials[i] # parameter_value_dict = json.loads(trial.parameter_values) # logger.info("The suggested parameters: {}".format(parameter_value_dict)) # metric = model.train(**parameter_value_dict) # client.complete_trial_with_one_metric(trial, metric) for i in range(30): # Get suggested trials trials = client.get_suggestions(study.id, 1) # Generate parameters parameter_value_dicts = [] for trial in trials: parameter_value_dict = json.loads(trial.parameter_values) logger.info("The suggested parameters: {}".format(parameter_value_dict)) parameter_value_dicts.append(parameter_value_dict) # Run training metrics = [] for i in range(len(trials)): metric = model.train(**parameter_value_dicts[i]) metrics.append(metric) # Complete the trial for i in range(len(trials)): trial = trials[i] client.complete_trial_with_one_metric(trial, metrics[i]) is_done = client.is_study_done(study.id) assert is_done best_trial = client.get_best_trial(study.id) logger.info("The study: {}, best trial: {}".format(study, best_trial))
def list_trials(args): client = AdvisorClient() print_trials(client.list_trials(args.study_id))
def list_studies(args): client = AdvisorClient() print_studies(client.list_studies())
def main(train_function): client = AdvisorClient() # Get or create the study study_configuration = { "goal": "MINIMIZE", "randomInitTrials": 1, "maxTrials": 5, "maxParallelTrials": 1, "params": [ { "parameterName": "filter_number0", "type": "INTEGER", "minValue": 16, "maxValue": 32, "feasiblePoints": "", "scalingType": "LINEAR" }, { "parameterName": "convolution_kernel_size0", "type": "INTEGER", "minValue": 3, "maxValue": 5, "feasiblePoints": "", "scalingType": "LINEAR" }, { "parameterName": "max_polling_size0", "type": "INTEGER", "minValue": 2, "maxValue": 3, "feasiblePoints": "", "scalingType": "LINEAR" }, { "parameterName": "filter_number1", "type": "INTEGER", "minValue": 16, "maxValue": 32, "feasiblePoints": "", "scalingType": "LINEAR" }, { "parameterName": "convolution_kernel_size1", "type": "INTEGER", "minValue": 3, "maxValue": 5, "feasiblePoints": "", "scalingType": "LINEAR" }, { "parameterName": "max_polling_size1", "type": "INTEGER", "minValue": 2, "maxValue": 3, "feasiblePoints": "", "scalingType": "LINEAR" }, ] } #study = client.create_study("Study", study_configuration, "BayesianOptimization") study = client.create_study("Study", study_configuration, "RandomSearchAlgorithm") #study = client.get_study_by_id(6) # Get suggested trials trials = client.get_suggestions(study.id, 3) #import ipdb;ipdb.set_trace() # Generate parameters parameter_value_dicts = [] for trial in trials: parameter_value_dict = json.loads(trial.parameter_values) print("The suggested parameters: {}".format(parameter_value_dict)) parameter_value_dicts.append(parameter_value_dict) # Run training metrics = [] for i in range(len(trials)): metric = train_function(**parameter_value_dicts[i]) #metric = train_function(parameter_value_dicts[i]) metrics.append(metric) trial = trials[i] # Complete the trial client.complete_trial_with_one_metric(trial, metrics[i]) is_done = client.is_study_done(study.id) best_trial = client.get_best_trial(study.id) print("The study: {}, best trial: {}".format(study, best_trial))
def list_trials(args): client = AdvisorClient() print_trials_as_table(client.list_trials(args.study_name))
def main(train_function): client = AdvisorClient() # Get or create the study study_configuration = { "goal": "MINIMIZE", "randomInitTrials": 1, "maxTrials": 5, "maxParallelTrials": 1, "params": [{ "parameterName": "operation_name1", "type": "CATEGORICAL", "minValue": 0, "maxValue": 0, "feasiblePoints": "Fliplr, Crop, GaussianBlur, ContrastNormalization, AdditiveGaussianNoise, Multiply", "scallingType": "LINEAR" }, { "parameterName": "magnitude1", "type": "INTEGER", "minValue": 1, "maxValue": 11, "scallingType": "LINEAR" }, { "parameterName": "probability1", "type": "INTEGER", "minValue": 1, "maxValue": 10, "scallingType": "LINEAR" }, { "parameterName": "operation_name2", "type": "CATEGORICAL", "minValue": 0, "maxValue": 0, "feasiblePoints": "Fliplr, Crop, GaussianBlur, ContrastNormalization, AdditiveGaussianNoise, Multiply", "scallingType": "LINEAR" }, { "parameterName": "magnitude2", "type": "INTEGER", "minValue": 1, "maxValue": 11, "scallingType": "LINEAR" }, { "parameterName": "probability2", "type": "INTEGER", "minValue": 1, "maxValue": 10, "scallingType": "LINEAR" }, { "parameterName": "operation_name3", "type": "CATEGORICAL", "minValue": 0, "maxValue": 0, "feasiblePoints": "Fliplr, Crop, GaussianBlur, ContrastNormalization, AdditiveGaussianNoise, Multiply", "scallingType": "LINEAR" }, { "parameterName": "magnitude3", "type": "INTEGER", "minValue": 1, "maxValue": 11, "scallingType": "LINEAR" }, { "parameterName": "probability3", "type": "INTEGER", "minValue": 1, "maxValue": 10, "scallingType": "LINEAR" }] } #study = client.create_study("Study", study_configuration, "BayesianOptimization") study = client.create_study("Study", study_configuration, "RandomSearchAlgorithm") #study = client.get_study_by_id(6) # Get suggested trials trials = client.get_suggestions(study.id, 3) #import ipdb;ipdb.set_trace() # Generate parameters parameter_value_dicts = [] for trial in trials: parameter_value_dict = json.loads(trial.parameter_values) print("The suggested parameters: {}".format(parameter_value_dict)) parameter_value_dicts.append(parameter_value_dict) # Run training metrics = [] for i in range(len(trials)): metric = train_function(**parameter_value_dicts[i]) #metric = train_function(parameter_value_dicts[i]) metrics.append(metric) trial = trials[i] # Complete the trial client.complete_trial_with_one_metric(trial, metrics[i]) is_done = client.is_study_done(study.id) best_trial = client.get_best_trial(study.id) print("The study: {}, best trial: {}".format(study, best_trial))