def setUpClass(cls): super(TestGPRGP, cls).setUpClass() boston = datasets.load_boston() data = boston['data'] X_train = data[0:500] X_test = data[500:501] y_train = boston['target'][0:500].reshape(500, 1) X_min = np.min(X_train, 0) X_max = np.max(X_train, 0) random.seed(0) np.random.seed(0) tf.set_random_seed(0) model_kwargs = {} opt_kwargs = {} opt_kwargs['learning_rate'] = 0.01 opt_kwargs['maxiter'] = 10 opt_kwargs['bounds'] = [X_min, X_max] opt_kwargs['ucb_beta'] = 1.0 tf.reset_default_graph() graph = tf.get_default_graph() gpflow.reset_default_session(graph=graph) cls.m = gpr_models.create_model('BasicGP', X=X_train, y=y_train, **model_kwargs) cls.gpr_result = tf_optimize(cls.m.model, X_test, **opt_kwargs)
def setUpClass(cls): super(TestGPRGPFlow, cls).setUpClass() boston = datasets.load_boston() data = boston['data'] X_train = data[0:500] X_test = data[500:] y_train = boston['target'][0:500].reshape(500, 1) model_kwargs = {'lengthscales': 1, 'variance': 1, 'noise_variance': 1} tf.reset_default_graph() graph = tf.get_default_graph() gpflow.reset_default_session(graph=graph) cls.m = gpr_models.create_model('BasicGP', X=X_train, y=y_train, **model_kwargs) cls.gpr_result = gpflow_predict(cls.m.model, X_test)
def run_optimize(X, y, X_sample, model_name, opt_kwargs, model_kwargs): timer = TimerStruct() # Create model (this also optimizes the hyperparameters if that option is enabled timer.start() m = gpr_models.create_model(model_name, X=X, y=y, **model_kwargs) timer.stop() model_creation_sec = timer.elapsed_seconds LOG.info(m._model.as_pandas_table()) # Optimize the DBMS's configuration knobs timer.start() X_new, ypred, yvar, loss = tf_optimize(m._model, X_sample, **opt_kwargs) timer.stop() config_optimize_sec = timer.elapsed_seconds return X_new, ypred, m.get_model_parameters(), m.get_hyperparameters()
def run_optimize(X, y, X_samples, model_name, opt_kwargs, model_kwargs): timer = TimerStruct() # Create model (this also optimizes the hyperparameters if that option is enabled timer.start() tf.reset_default_graph() graph = tf.get_default_graph() gpflow.reset_default_session(graph=graph) m = gpr_models.create_model(model_name, X=X, y=y, **model_kwargs) timer.stop() model_creation_sec = timer.elapsed_seconds LOG.info(m.model.as_pandas_table()) # Optimize the DBMS's configuration knobs timer.start() res = tf_optimize(m.model, X_samples, **opt_kwargs) timer.stop() config_optimize_sec = timer.elapsed_seconds return res.minl_conf, res.minl, m.get_model_parameters(), m.get_hyperparameters()
def configuration_recommendation(recommendation_input): target_data, algorithm = recommendation_input LOG.info('configuration_recommendation called') if target_data['bad'] is True: target_data_res = dict( status='bad', result_id=target_data['newest_result_id'], info='WARNING: no training data, the config is generated randomly', recommendation=target_data['config_recommend'], pipeline_run=target_data['pipeline_run']) LOG.debug('%s: Skipping configuration recommendation.\n\ndata=%s\n', AlgorithmType.name(algorithm), JSONUtil.dumps(target_data, pprint=True)) return target_data_res # Load mapped workload data mapped_workload_id = target_data['mapped_workload'][0] latest_pipeline_run = PipelineRun.objects.get( pk=target_data['pipeline_run']) mapped_workload = Workload.objects.get(pk=mapped_workload_id) workload_knob_data = PipelineData.objects.get( pipeline_run=latest_pipeline_run, workload=mapped_workload, task_type=PipelineTaskType.KNOB_DATA) workload_knob_data = JSONUtil.loads(workload_knob_data.data) workload_metric_data = PipelineData.objects.get( pipeline_run=latest_pipeline_run, workload=mapped_workload, task_type=PipelineTaskType.METRIC_DATA) workload_metric_data = JSONUtil.loads(workload_metric_data.data) newest_result = Result.objects.get(pk=target_data['newest_result_id']) cleaned_workload_knob_data = clean_knob_data( workload_knob_data["data"], workload_knob_data["columnlabels"], newest_result.session) X_workload = np.array(cleaned_workload_knob_data[0]) X_columnlabels = np.array(cleaned_workload_knob_data[1]) y_workload = np.array(workload_metric_data['data']) y_columnlabels = np.array(workload_metric_data['columnlabels']) rowlabels_workload = np.array(workload_metric_data['rowlabels']) # Target workload data newest_result = Result.objects.get(pk=target_data['newest_result_id']) X_target = target_data['X_matrix'] y_target = target_data['y_matrix'] rowlabels_target = np.array(target_data['rowlabels']) if not np.array_equal(X_columnlabels, target_data['X_columnlabels']): raise Exception(('The workload and target data should have ' 'identical X columnlabels (sorted knob names)')) if not np.array_equal(y_columnlabels, target_data['y_columnlabels']): raise Exception(('The workload and target data should have ' 'identical y columnlabels (sorted metric names)')) # Filter Xs by top 10 ranked knobs ranked_knobs = PipelineData.objects.get( pipeline_run=latest_pipeline_run, workload=mapped_workload, task_type=PipelineTaskType.RANKED_KNOBS) ranked_knobs = JSONUtil.loads(ranked_knobs.data)[:IMPORTANT_KNOB_NUMBER] ranked_knob_idxs = [ i for i, cl in enumerate(X_columnlabels) if cl in ranked_knobs ] X_workload = X_workload[:, ranked_knob_idxs] X_target = X_target[:, ranked_knob_idxs] X_columnlabels = X_columnlabels[ranked_knob_idxs] # Filter ys by current target objective metric target_objective = newest_result.session.target_objective target_obj_idx = [ i for i, cl in enumerate(y_columnlabels) if cl == target_objective ] if len(target_obj_idx) == 0: raise Exception(('Could not find target objective in metrics ' '(target_obj={})').format(target_objective)) elif len(target_obj_idx) > 1: raise Exception( ('Found {} instances of target objective in ' 'metrics (target_obj={})').format(len(target_obj_idx), target_objective)) metric_meta = db.target_objectives.get_metric_metadata( newest_result.session.dbms.pk, newest_result.session.target_objective) lessisbetter = metric_meta[ target_objective].improvement == db.target_objectives.LESS_IS_BETTER y_workload = y_workload[:, target_obj_idx] y_target = y_target[:, target_obj_idx] y_columnlabels = y_columnlabels[target_obj_idx] # Combine duplicate rows in the target/workload data (separately) X_workload, y_workload, rowlabels_workload = DataUtil.combine_duplicate_rows( X_workload, y_workload, rowlabels_workload) X_target, y_target, rowlabels_target = DataUtil.combine_duplicate_rows( X_target, y_target, rowlabels_target) # Delete any rows that appear in both the workload data and the target # data from the workload data dups_filter = np.ones(X_workload.shape[0], dtype=bool) target_row_tups = [tuple(row) for row in X_target] for i, row in enumerate(X_workload): if tuple(row) in target_row_tups: dups_filter[i] = False X_workload = X_workload[dups_filter, :] y_workload = y_workload[dups_filter, :] rowlabels_workload = rowlabels_workload[dups_filter] # Combine target & workload Xs for preprocessing X_matrix = np.vstack([X_target, X_workload]) # Dummy encode categorial variables categorical_info = DataUtil.dummy_encoder_helper(X_columnlabels, mapped_workload.dbms) dummy_encoder = DummyEncoder(categorical_info['n_values'], categorical_info['categorical_features'], categorical_info['cat_columnlabels'], categorical_info['noncat_columnlabels']) X_matrix = dummy_encoder.fit_transform(X_matrix) # below two variables are needed for correctly determing max/min on dummies binary_index_set = set(categorical_info['binary_vars']) total_dummies = dummy_encoder.total_dummies() # Scale to N(0, 1) X_scaler = StandardScaler() X_scaled = X_scaler.fit_transform(X_matrix) if y_target.shape[0] < 5: # FIXME # FIXME (dva): if there are fewer than 5 target results so far # then scale the y values (metrics) using the workload's # y_scaler. I'm not sure if 5 is the right cutoff. y_target_scaler = None y_workload_scaler = StandardScaler() y_matrix = np.vstack([y_target, y_workload]) y_scaled = y_workload_scaler.fit_transform(y_matrix) else: # FIXME (dva): otherwise try to compute a separate y_scaler for # the target and scale them separately. try: y_target_scaler = StandardScaler() y_workload_scaler = StandardScaler() y_target_scaled = y_target_scaler.fit_transform(y_target) y_workload_scaled = y_workload_scaler.fit_transform(y_workload) y_scaled = np.vstack([y_target_scaled, y_workload_scaled]) except ValueError: y_target_scaler = None y_workload_scaler = StandardScaler() y_scaled = y_workload_scaler.fit_transform(y_target) # Set up constraint helper constraint_helper = ParamConstraintHelper( scaler=X_scaler, encoder=dummy_encoder, binary_vars=categorical_info['binary_vars'], init_flip_prob=INIT_FLIP_PROB, flip_prob_decay=FLIP_PROB_DECAY) # FIXME (dva): check if these are good values for the ridge # ridge = np.empty(X_scaled.shape[0]) # ridge[:X_target.shape[0]] = 0.01 # ridge[X_target.shape[0]:] = 0.1 # FIXME: we should generate more samples and use a smarter sampling # technique num_samples = NUM_SAMPLES X_samples = np.empty((num_samples, X_scaled.shape[1])) X_min = np.empty(X_scaled.shape[1]) X_max = np.empty(X_scaled.shape[1]) X_scaler_matrix = np.zeros([1, X_scaled.shape[1]]) session_knobs = SessionKnob.objects.get_knobs_for_session( newest_result.session) # Set min/max for knob values for i in range(X_scaled.shape[1]): if i < total_dummies or i in binary_index_set: col_min = 0 col_max = 1 else: col_min = X_scaled[:, i].min() col_max = X_scaled[:, i].max() for knob in session_knobs: if X_columnlabels[i] == knob["name"]: X_scaler_matrix[0][i] = knob["minval"] col_min = X_scaler.transform(X_scaler_matrix)[0][i] X_scaler_matrix[0][i] = knob["maxval"] col_max = X_scaler.transform(X_scaler_matrix)[0][i] X_min[i] = col_min X_max[i] = col_max X_samples[:, i] = np.random.rand(num_samples) * (col_max - col_min) + col_min # Maximize the throughput, moreisbetter # Use gradient descent to minimize -throughput if not lessisbetter: y_scaled = -y_scaled q = queue.PriorityQueue() for x in range(0, y_scaled.shape[0]): q.put((y_scaled[x][0], x)) i = 0 while i < TOP_NUM_CONFIG: try: item = q.get_nowait() # Tensorflow get broken if we use the training data points as # starting points for GPRGD. We add a small bias for the # starting points. GPR_EPS default value is 0.001 # if the starting point is X_max, we minus a small bias to # make sure it is within the range. dist = sum(np.square(X_max - X_scaled[item[1]])) if dist < 0.001: X_samples = np.vstack( (X_samples, X_scaled[item[1]] - abs(GPR_EPS))) else: X_samples = np.vstack( (X_samples, X_scaled[item[1]] + abs(GPR_EPS))) i = i + 1 except queue.Empty: break session = newest_result.session res = None if algorithm == AlgorithmType.DNN: # neural network model model_nn = NeuralNet(n_input=X_samples.shape[1], batch_size=X_samples.shape[0], explore_iters=DNN_EXPLORE_ITER, noise_scale_begin=DNN_NOISE_SCALE_BEGIN, noise_scale_end=DNN_NOISE_SCALE_END, debug=DNN_DEBUG, debug_interval=DNN_DEBUG_INTERVAL) if session.dnn_model is not None: model_nn.set_weights_bin(session.dnn_model) model_nn.fit(X_scaled, y_scaled, fit_epochs=DNN_TRAIN_ITER) res = model_nn.recommend(X_samples, X_min, X_max, explore=DNN_EXPLORE, recommend_epochs=MAX_ITER) session.dnn_model = model_nn.get_weights_bin() session.save() elif algorithm == AlgorithmType.GPR: # default gpr model if USE_GPFLOW: model_kwargs = {} model_kwargs['model_learning_rate'] = HP_LEARNING_RATE model_kwargs['model_maxiter'] = HP_MAX_ITER opt_kwargs = {} opt_kwargs['learning_rate'] = DEFAULT_LEARNING_RATE opt_kwargs['maxiter'] = MAX_ITER opt_kwargs['bounds'] = [X_min, X_max] ucb_beta = 'get_beta_td' opt_kwargs['ucb_beta'] = ucb.get_ucb_beta(ucb_beta, scale=DEFAULT_UCB_SCALE, t=i + 1., ndim=X_scaled.shape[1]) tf.reset_default_graph() graph = tf.get_default_graph() gpflow.reset_default_session(graph=graph) m = gpr_models.create_model('BasicGP', X=X_scaled, y=y_scaled, **model_kwargs) res = tf_optimize(m.model, X_samples, **opt_kwargs) else: model = GPRGD(length_scale=DEFAULT_LENGTH_SCALE, magnitude=DEFAULT_MAGNITUDE, max_train_size=MAX_TRAIN_SIZE, batch_size=BATCH_SIZE, num_threads=NUM_THREADS, learning_rate=DEFAULT_LEARNING_RATE, epsilon=DEFAULT_EPSILON, max_iter=MAX_ITER, sigma_multiplier=DEFAULT_SIGMA_MULTIPLIER, mu_multiplier=DEFAULT_MU_MULTIPLIER) model.fit(X_scaled, y_scaled, X_min, X_max, ridge=DEFAULT_RIDGE) res = model.predict(X_samples, constraint_helper=constraint_helper) best_config_idx = np.argmin(res.minl.ravel()) best_config = res.minl_conf[best_config_idx, :] best_config = X_scaler.inverse_transform(best_config) # Decode one-hot encoding into categorical knobs best_config = dummy_encoder.inverse_transform(best_config) # Although we have max/min limits in the GPRGD training session, it may # lose some precisions. e.g. 0.99..99 >= 1.0 may be True on the scaled data, # when we inversely transform the scaled data, the different becomes much larger # and cannot be ignored. Here we check the range on the original data # directly, and make sure the recommended config lies within the range X_min_inv = X_scaler.inverse_transform(X_min) X_max_inv = X_scaler.inverse_transform(X_max) best_config = np.minimum(best_config, X_max_inv) best_config = np.maximum(best_config, X_min_inv) conf_map = {k: best_config[i] for i, k in enumerate(X_columnlabels)} conf_map_res = dict(status='good', result_id=target_data['newest_result_id'], recommendation=conf_map, info='INFO: training data size is {}'.format( X_scaled.shape[0]), pipeline_run=latest_pipeline_run.pk) LOG.debug('%s: Finished selecting the next config.\n\ndata=%s\n', AlgorithmType.name(algorithm), JSONUtil.dumps(conf_map_res, pprint=True)) return conf_map_res
def configuration_recommendation(recommendation_input): target_data, algorithm = recommendation_input LOG.info('configuration_recommendation called') newest_result = Result.objects.get(pk=target_data['newest_result_id']) session = newest_result.session params = JSONUtil.loads(session.hyperparameters) if target_data['bad'] is True: target_data_res = create_and_save_recommendation( recommended_knobs=target_data['config_recommend'], result=newest_result, status='bad', info='WARNING: no training data, the config is generated randomly', pipeline_run=target_data['pipeline_run']) LOG.debug('%s: Skipping configuration recommendation.\n\ndata=%s\n', AlgorithmType.name(algorithm), JSONUtil.dumps(target_data, pprint=True)) return target_data_res X_columnlabels, X_scaler, X_scaled, y_scaled, X_max, X_min,\ dummy_encoder, constraint_helper = combine_workload(target_data) # FIXME: we should generate more samples and use a smarter sampling # technique num_samples = params['NUM_SAMPLES'] X_samples = np.empty((num_samples, X_scaled.shape[1])) for i in range(X_scaled.shape[1]): X_samples[:, i] = np.random.rand(num_samples) * (X_max[i] - X_min[i]) + X_min[i] q = queue.PriorityQueue() for x in range(0, y_scaled.shape[0]): q.put((y_scaled[x][0], x)) i = 0 while i < params['TOP_NUM_CONFIG']: try: item = q.get_nowait() # Tensorflow get broken if we use the training data points as # starting points for GPRGD. We add a small bias for the # starting points. GPR_EPS default value is 0.001 # if the starting point is X_max, we minus a small bias to # make sure it is within the range. dist = sum(np.square(X_max - X_scaled[item[1]])) if dist < 0.001: X_samples = np.vstack( (X_samples, X_scaled[item[1]] - abs(params['GPR_EPS']))) else: X_samples = np.vstack( (X_samples, X_scaled[item[1]] + abs(params['GPR_EPS']))) i = i + 1 except queue.Empty: break res = None if algorithm == AlgorithmType.DNN: # neural network model model_nn = NeuralNet(n_input=X_samples.shape[1], batch_size=X_samples.shape[0], explore_iters=params['DNN_EXPLORE_ITER'], noise_scale_begin=params['DNN_NOISE_SCALE_BEGIN'], noise_scale_end=params['DNN_NOISE_SCALE_END'], debug=params['DNN_DEBUG'], debug_interval=params['DNN_DEBUG_INTERVAL']) if session.dnn_model is not None: model_nn.set_weights_bin(session.dnn_model) model_nn.fit(X_scaled, y_scaled, fit_epochs=params['DNN_TRAIN_ITER']) res = model_nn.recommend(X_samples, X_min, X_max, explore=params['DNN_EXPLORE'], recommend_epochs=params['DNN_GD_ITER']) session.dnn_model = model_nn.get_weights_bin() session.save() elif algorithm == AlgorithmType.GPR: # default gpr model if params['GPR_USE_GPFLOW']: model_kwargs = {} model_kwargs['model_learning_rate'] = params[ 'GPR_HP_LEARNING_RATE'] model_kwargs['model_maxiter'] = params['GPR_HP_MAX_ITER'] opt_kwargs = {} opt_kwargs['learning_rate'] = params['GPR_LEARNING_RATE'] opt_kwargs['maxiter'] = params['GPR_MAX_ITER'] opt_kwargs['bounds'] = [X_min, X_max] opt_kwargs['debug'] = params['GPR_DEBUG'] opt_kwargs['ucb_beta'] = ucb.get_ucb_beta( params['GPR_UCB_BETA'], scale=params['GPR_UCB_SCALE'], t=i + 1., ndim=X_scaled.shape[1]) tf.reset_default_graph() graph = tf.get_default_graph() gpflow.reset_default_session(graph=graph) m = gpr_models.create_model(params['GPR_MODEL_NAME'], X=X_scaled, y=y_scaled, **model_kwargs) res = tf_optimize(m.model, X_samples, **opt_kwargs) else: model = GPRGD(length_scale=params['GPR_LENGTH_SCALE'], magnitude=params['GPR_MAGNITUDE'], max_train_size=params['GPR_MAX_TRAIN_SIZE'], batch_size=params['GPR_BATCH_SIZE'], num_threads=params['TF_NUM_THREADS'], learning_rate=params['GPR_LEARNING_RATE'], epsilon=params['GPR_EPSILON'], max_iter=params['GPR_MAX_ITER'], sigma_multiplier=params['GPR_SIGMA_MULTIPLIER'], mu_multiplier=params['GPR_MU_MULTIPLIER'], ridge=params['GPR_RIDGE']) model.fit(X_scaled, y_scaled, X_min, X_max) res = model.predict(X_samples, constraint_helper=constraint_helper) best_config_idx = np.argmin(res.minl.ravel()) best_config = res.minl_conf[best_config_idx, :] best_config = X_scaler.inverse_transform(best_config) if ENABLE_DUMMY_ENCODER: # Decode one-hot encoding into categorical knobs best_config = dummy_encoder.inverse_transform(best_config) # Although we have max/min limits in the GPRGD training session, it may # lose some precisions. e.g. 0.99..99 >= 1.0 may be True on the scaled data, # when we inversely transform the scaled data, the different becomes much larger # and cannot be ignored. Here we check the range on the original data # directly, and make sure the recommended config lies within the range X_min_inv = X_scaler.inverse_transform(X_min) X_max_inv = X_scaler.inverse_transform(X_max) best_config = np.minimum(best_config, X_max_inv) best_config = np.maximum(best_config, X_min_inv) conf_map = {k: best_config[i] for i, k in enumerate(X_columnlabels)} conf_map_res = create_and_save_recommendation( recommended_knobs=conf_map, result=newest_result, status='good', info='INFO: training data size is {}'.format(X_scaled.shape[0]), pipeline_run=target_data['pipeline_run']) LOG.debug('%s: Finished selecting the next config.\n\ndata=%s\n', AlgorithmType.name(algorithm), JSONUtil.dumps(conf_map_res, pprint=True)) return conf_map_res
def map_workload(map_workload_input): start_ts = time.time() target_data, algorithm = map_workload_input if target_data['bad']: assert target_data is not None target_data['pipeline_run'] = None LOG.debug('%s: Skipping workload mapping.\n\ndata=%s\n', AlgorithmType.name(algorithm), JSONUtil.dumps(target_data, pprint=True)) return target_data, algorithm # Get the latest version of pipeline data that's been computed so far. latest_pipeline_run = PipelineRun.objects.get_latest() assert latest_pipeline_run is not None target_data['pipeline_run'] = latest_pipeline_run.pk newest_result = Result.objects.get(pk=target_data['newest_result_id']) session = newest_result.session params = JSONUtil.loads(session.hyperparameters) target_workload = newest_result.workload X_columnlabels = np.array(target_data['X_columnlabels']) y_columnlabels = np.array(target_data['y_columnlabels']) # Find all pipeline data belonging to the latest version with the same # DBMS and hardware as the target pipeline_data = PipelineData.objects.filter( pipeline_run=latest_pipeline_run, workload__dbms=target_workload.dbms, workload__hardware=target_workload.hardware, workload__project=target_workload.project) # FIXME (dva): we should also compute the global (i.e., overall) ranked_knobs # and pruned metrics but we just use those from the first workload for now initialized = False global_ranked_knobs = None global_pruned_metrics = None ranked_knob_idxs = None pruned_metric_idxs = None unique_workloads = pipeline_data.values_list('workload', flat=True).distinct() workload_data = {} # Compute workload mapping data for each unique workload for unique_workload in unique_workloads: workload_obj = Workload.objects.get(pk=unique_workload) wkld_results = Result.objects.filter(workload=workload_obj) if wkld_results.exists() is False: # delete the workload workload_obj.delete() continue # Load knob & metric data for this workload knob_data = load_data_helper(pipeline_data, unique_workload, PipelineTaskType.KNOB_DATA) knob_data["data"], knob_data["columnlabels"] = clean_knob_data( knob_data["data"], knob_data["columnlabels"], newest_result.session) metric_data = load_data_helper(pipeline_data, unique_workload, PipelineTaskType.METRIC_DATA) X_matrix = np.array(knob_data["data"]) y_matrix = np.array(metric_data["data"]) rowlabels = np.array(knob_data["rowlabels"]) assert np.array_equal(rowlabels, metric_data["rowlabels"]) if not initialized: # For now set ranked knobs & pruned metrics to be those computed # for the first workload global_ranked_knobs = load_data_helper( pipeline_data, unique_workload, PipelineTaskType.RANKED_KNOBS )[:params['IMPORTANT_KNOB_NUMBER']] global_pruned_metrics = load_data_helper( pipeline_data, unique_workload, PipelineTaskType.PRUNED_METRICS) ranked_knob_idxs = [ i for i in range(X_matrix.shape[1]) if X_columnlabels[i] in global_ranked_knobs ] pruned_metric_idxs = [ i for i in range(y_matrix.shape[1]) if y_columnlabels[i] in global_pruned_metrics ] # Filter X & y columnlabels by top ranked_knobs & pruned_metrics X_columnlabels = X_columnlabels[ranked_knob_idxs] y_columnlabels = y_columnlabels[pruned_metric_idxs] initialized = True # Filter X & y matrices by top ranked_knobs & pruned_metrics X_matrix = X_matrix[:, ranked_knob_idxs] y_matrix = y_matrix[:, pruned_metric_idxs] # Combine duplicate rows (rows with same knob settings) X_matrix, y_matrix, rowlabels = DataUtil.combine_duplicate_rows( X_matrix, y_matrix, rowlabels) workload_data[unique_workload] = { 'X_matrix': X_matrix, 'y_matrix': y_matrix, 'rowlabels': rowlabels, } if len(workload_data) == 0: # The background task that aggregates the data has not finished running yet target_data.update(mapped_workload=None, scores=None) LOG.debug( '%s: Skipping workload mapping because there is no parsed workload.\n', AlgorithmType.name(algorithm)) return target_data, algorithm # Stack all X & y matrices for preprocessing Xs = np.vstack( [entry['X_matrix'] for entry in list(workload_data.values())]) ys = np.vstack( [entry['y_matrix'] for entry in list(workload_data.values())]) # Scale the X & y values, then compute the deciles for each column in y X_scaler = StandardScaler(copy=False) X_scaler.fit(Xs) y_scaler = StandardScaler(copy=False) y_scaler.fit_transform(ys) y_binner = Bin(bin_start=1, axis=0) y_binner.fit(ys) del Xs del ys # Filter the target's X & y data by the ranked knobs & pruned metrics. X_target = target_data['X_matrix'][:, ranked_knob_idxs] y_target = target_data['y_matrix'][:, pruned_metric_idxs] # Now standardize the target's data and bin it by the deciles we just # calculated X_target = X_scaler.transform(X_target) y_target = y_scaler.transform(y_target) y_target = y_binner.transform(y_target) scores = {} for workload_id, workload_entry in list(workload_data.items()): predictions = np.empty_like(y_target) X_workload = workload_entry['X_matrix'] X_scaled = X_scaler.transform(X_workload) y_workload = workload_entry['y_matrix'] y_scaled = y_scaler.transform(y_workload) for j, y_col in enumerate(y_scaled.T): # Using this workload's data, train a Gaussian process model # and then predict the performance of each metric for each of # the knob configurations attempted so far by the target. y_col = y_col.reshape(-1, 1) if params['GPR_USE_GPFLOW']: model_kwargs = { 'lengthscales': params['GPR_LENGTH_SCALE'], 'variance': params['GPR_MAGNITUDE'], 'noise_variance': params['GPR_RIDGE'] } tf.reset_default_graph() graph = tf.get_default_graph() gpflow.reset_default_session(graph=graph) m = gpr_models.create_model(params['GPR_MODEL_NAME'], X=X_scaled, y=y_col, **model_kwargs) gpr_result = gpflow_predict(m.model, X_target) else: model = GPRNP(length_scale=params['GPR_LENGTH_SCALE'], magnitude=params['GPR_MAGNITUDE'], max_train_size=params['GPR_MAX_TRAIN_SIZE'], batch_size=params['GPR_BATCH_SIZE']) model.fit(X_scaled, y_col, ridge=params['GPR_RIDGE']) gpr_result = model.predict(X_target) predictions[:, j] = gpr_result.ypreds.ravel() # Bin each of the predicted metric columns by deciles and then # compute the score (i.e., distance) between the target workload # and each of the known workloads predictions = y_binner.transform(predictions) dists = np.sqrt( np.sum(np.square(np.subtract(predictions, y_target)), axis=1)) scores[workload_id] = np.mean(dists) # Find the best (minimum) score best_score = np.inf best_workload_id = None best_workload_name = None scores_info = {} for workload_id, similarity_score in list(scores.items()): workload_name = Workload.objects.get(pk=workload_id).name if similarity_score < best_score: best_score = similarity_score best_workload_id = workload_id best_workload_name = workload_name scores_info[workload_id] = (workload_name, similarity_score) target_data.update(mapped_workload=(best_workload_id, best_workload_name, best_score), scores=scores_info) LOG.debug('%s: Finished mapping the workload.\n\ndata=%s\n', AlgorithmType.name(algorithm), JSONUtil.dumps(target_data, pprint=True)) save_execution_time(start_ts, "map_workload", newest_result) return target_data, algorithm