def test_function(): model = learning.Learner("svr", "") model.learn([[0, 0]], [1]) pred = model.predict([[0, 0]]) print_line_array(pred) model = learning.Learner("ssvr", "") model.learn([[0, 0]], [1]) pred = model.predict([[0, 0]]) print_line_array(pred) model = learning.Learner("svr", ["C=\'bogus\'"]) model.learn([[0, 0]], [1]) print_line_array(model.predict([[0, 0]])) model = learning.Learner("svr", []) print_line_array(model.predict([[0, 0]]))
def main(): configurations_learn = Configurations() configurations_predict = Configurations() learning_strategy = "" learner_settings = [] # Sequence for getting the basic learning settings from C# print_line(REQUESTING_LEARNING_SETTINGS) csharp_response = raw_input() if csharp_response == SETTING_STREAM_START: learning_strategy = raw_input() learner_setting = raw_input() while learner_setting != SETTING_STREAM_END: # pair of settings passed by other application in format identifier=value learner_settings.append(learner_setting) learner_setting = raw_input() print_line(REQUESTING_CONFIGURATION) task = raw_input() # perform prediction if task == START_LEARN: global number_of_configurations number_of_configurations = len(configurations_learn.results) model = learning.Learner(learning_strategy, learner_settings) task = raw_input() if task == CONFIG_PARTIAL_STREAM_START: conf_partial(model) configurations_predict = get_configurations_predict( configurations_predict) predictions = model.predict(configurations_predict.features) print_line(FINISHED_LEARNING) if raw_input() == REQUESTING_LEARNING_RESULTS: print_line_array(predictions) # perform parameter tuning elif task == START_PARAM_TUNING: configurations_learn = get_configurations_learn(configurations_learn) configurations_predict = get_configurations_predict( configurations_predict) print_line(FINISHED_LEARNING) target_path = raw_input() parameterTuning.setOutputPath(target_path) optimal_parameters = parameterTuning.optimizeParameter( learning_strategy, configurations_learn.features, configurations_learn.results, learner_settings) if raw_input() == REQUESTING_LEARNING_RESULTS: print_line(optimal_parameters)
def main(): if argv[1].lower() == "true": global debug debug = True print("Debug output mode enabled. \n Processing input\n", file=sys.stderr, flush=True) configurations_learn = Configurations() configurations_predict = Configurations() learning_strategy = "" learner_settings = [] # Sequence for getting the basic learning settings from C# print_line(REQUESTING_LEARNING_SETTINGS) csharp_response = input() if csharp_response == SETTING_STREAM_START: learning_strategy = input() learner_setting = input() if debug: print("Received learning strategy:" + learning_strategy + "\n", file=sys.stderr, flush=True) print("Received learner setting:" + learner_setting + "\n", file=sys.stderr, flush=True) while learner_setting != SETTING_STREAM_END: # pair of settings passed by other application in format identifier=value learner_settings.append(learner_setting) learner_setting = input() if debug: print("Received learner setting:" + learner_setting + "\n", file=sys.stderr, flush=True) get_configurations(configurations_learn, configurations_predict) if debug: print("Found learning set. " + str(configurations_learn) + "\n", file=sys.stderr, flush=True) print("Found validation set. " + str(configurations_learn) + "\n", file=sys.stderr, flush=True) task = input() # perform prediction if task == START_LEARN: if debug: print("Starting the learning.\n", file=sys.stderr, flush=True) learning.number_of_configurations = len(configurations_learn.results) model = learning.Learner(learning_strategy, learner_settings) start = perf_counter() model.learn(configurations_learn.features, configurations_learn.results) elapsed = perf_counter() - start predictions = model.predict(configurations_predict.features) print_line(FINISHED_LEARNING) if debug: print("Finished the learning.\n", file=sys.stderr, flush=True) if input() == REQUESTING_LEARNING_RESULTS: if debug: print("Extracting trees.\n", file=sys.stderr, flush=True) print_line_array(predictions) if not tree_path.strip() is "" and check_prereq(model.learning_model): print_line(str(elapsed)) tree_file = open(tree_path, 'w') tree = learnerExtraction.extract(model.learning_model) if len(tree) == 1: tree_file.write(str(tree) + "\n") else: forest = tree for tree in forest: tree_file.write(str(tree) + "\n") tree_file.flush() tree_file.close() # perform parameter tuning elif task == START_PARAM_TUNING: if debug: print("Starting the learning.\n", file=sys.stderr, flush=True) parameterTuning.setOutputPath(input()) optimal_parameters = parameterTuning.optimizeParameter( learning_strategy, configurations_learn.features, configurations_learn.results, learner_settings) print_line(FINISHED_LEARNING) if input() == REQUESTING_LEARNING_RESULTS: print_line(optimal_parameters)
def main(args): topology_name = args['topology'] previous_model = args['nn_model'] max_runs = args['max_runs'] # Load the topology (from argument) and create the Storm runner topology = experiments.Topology.load(topology_name) runner = stormmetrics.TopologyRunner(topology) # Length of each action space # e.g.: split_bolt_num (10 possible values), backpressure.enable (2 possible values)... actions = sorted(topology.configurable_params.keys()) actions_values = [topology.configurable_params[a] for a in actions] actions_space = [len(v) for v in actions_values] # Length of each metric # e.g.: throughput (1 value), tail latency (5 values), CPU utilization (12 values)... metrics = sorted(runner.metrics.keys()) metrics_length = [runner.metrics[m] for m in metrics] state_length = [len(actions)] + metrics_length with tf.Session() as sess, open(LOG_FILE, 'w') as log_file: # The learner instance communicates with the ML framework. learner = learning.Learner(sess, actions_space, state_length, previous_model) run = None epoch = 0 # Training loop while max_runs is None or epoch < max_runs: logging.info('=== Epoch: %s' % epoch) ######### # State # ######### if run is not None: # Extract run metrics information (in sorted order) state = [] state.append([run.config[action] for action in actions]) for metric in metrics: state.append(stormmetrics.get_metrics(run.results)[metric]) else: # Fake initial metric information (don't train with those) state = [l * [0] for l in [len(actions)] + metrics_length] ########## # Action # ########## actions_choices = learner.predict(state) # Map predicted action to actual configuration parameters config = {} for action, values, choice in zip(actions, actions_values, actions_choices): config[action] = values[choice] ########### # Metrics # ########### run = runner.run(config) ########## # Reward # ########## reward = reward_function(stormmetrics.get_metrics(run.results)) log_file.write('\t'.join([ str(epoch), str(actions), str(reward), str(metrics), str(learner.entropy_record[-1]) ]) + '\n') log_file.flush() if epoch > 0: learner.train(actions_choices, state, reward, epoch) epoch += 1