Esempio n. 1
0
def configuration_recommendation(recommendation_input):
    target_data, algorithm = recommendation_input
    LOG.info('configuration_recommendation called')
    newest_result = Result.objects.get(pk=target_data['newest_result_id'])
    session = newest_result.session
    params = JSONUtil.loads(session.hyperparameters)

    if target_data['bad'] is True:
        target_data_res = create_and_save_recommendation(
            recommended_knobs=target_data['config_recommend'],
            result=newest_result,
            status='bad',
            info='WARNING: no training data, the config is generated randomly',
            pipeline_run=target_data['pipeline_run'])
        LOG.debug('%s: Skipping configuration recommendation.\n\ndata=%s\n',
                  AlgorithmType.name(algorithm),
                  JSONUtil.dumps(target_data, pprint=True))
        return target_data_res

    X_columnlabels, X_scaler, X_scaled, y_scaled, X_max, X_min,\
        dummy_encoder, constraint_helper = combine_workload(target_data)

    # FIXME: we should generate more samples and use a smarter sampling
    # technique
    num_samples = params['NUM_SAMPLES']
    X_samples = np.empty((num_samples, X_scaled.shape[1]))
    for i in range(X_scaled.shape[1]):
        X_samples[:, i] = np.random.rand(num_samples) * (X_max[i] -
                                                         X_min[i]) + X_min[i]

    q = queue.PriorityQueue()
    for x in range(0, y_scaled.shape[0]):
        q.put((y_scaled[x][0], x))

    i = 0
    while i < params['TOP_NUM_CONFIG']:
        try:
            item = q.get_nowait()
            # Tensorflow get broken if we use the training data points as
            # starting points for GPRGD. We add a small bias for the
            # starting points. GPR_EPS default value is 0.001
            # if the starting point is X_max, we minus a small bias to
            # make sure it is within the range.
            dist = sum(np.square(X_max - X_scaled[item[1]]))
            if dist < 0.001:
                X_samples = np.vstack(
                    (X_samples, X_scaled[item[1]] - abs(params['GPR_EPS'])))
            else:
                X_samples = np.vstack(
                    (X_samples, X_scaled[item[1]] + abs(params['GPR_EPS'])))
            i = i + 1
        except queue.Empty:
            break

    res = None

    if algorithm == AlgorithmType.DNN:
        # neural network model
        model_nn = NeuralNet(n_input=X_samples.shape[1],
                             batch_size=X_samples.shape[0],
                             explore_iters=params['DNN_EXPLORE_ITER'],
                             noise_scale_begin=params['DNN_NOISE_SCALE_BEGIN'],
                             noise_scale_end=params['DNN_NOISE_SCALE_END'],
                             debug=params['DNN_DEBUG'],
                             debug_interval=params['DNN_DEBUG_INTERVAL'])
        if session.dnn_model is not None:
            model_nn.set_weights_bin(session.dnn_model)
        model_nn.fit(X_scaled, y_scaled, fit_epochs=params['DNN_TRAIN_ITER'])
        res = model_nn.recommend(X_samples,
                                 X_min,
                                 X_max,
                                 explore=params['DNN_EXPLORE'],
                                 recommend_epochs=params['DNN_GD_ITER'])
        session.dnn_model = model_nn.get_weights_bin()
        session.save()

    elif algorithm == AlgorithmType.GPR:
        # default gpr model
        if params['GPR_USE_GPFLOW']:
            model_kwargs = {}
            model_kwargs['model_learning_rate'] = params[
                'GPR_HP_LEARNING_RATE']
            model_kwargs['model_maxiter'] = params['GPR_HP_MAX_ITER']
            opt_kwargs = {}
            opt_kwargs['learning_rate'] = params['GPR_LEARNING_RATE']
            opt_kwargs['maxiter'] = params['GPR_MAX_ITER']
            opt_kwargs['bounds'] = [X_min, X_max]
            opt_kwargs['debug'] = params['GPR_DEBUG']
            opt_kwargs['ucb_beta'] = ucb.get_ucb_beta(
                params['GPR_UCB_BETA'],
                scale=params['GPR_UCB_SCALE'],
                t=i + 1.,
                ndim=X_scaled.shape[1])
            tf.reset_default_graph()
            graph = tf.get_default_graph()
            gpflow.reset_default_session(graph=graph)
            m = gpr_models.create_model(params['GPR_MODEL_NAME'],
                                        X=X_scaled,
                                        y=y_scaled,
                                        **model_kwargs)
            res = tf_optimize(m.model, X_samples, **opt_kwargs)
        else:
            model = GPRGD(length_scale=params['GPR_LENGTH_SCALE'],
                          magnitude=params['GPR_MAGNITUDE'],
                          max_train_size=params['GPR_MAX_TRAIN_SIZE'],
                          batch_size=params['GPR_BATCH_SIZE'],
                          num_threads=params['TF_NUM_THREADS'],
                          learning_rate=params['GPR_LEARNING_RATE'],
                          epsilon=params['GPR_EPSILON'],
                          max_iter=params['GPR_MAX_ITER'],
                          sigma_multiplier=params['GPR_SIGMA_MULTIPLIER'],
                          mu_multiplier=params['GPR_MU_MULTIPLIER'],
                          ridge=params['GPR_RIDGE'])
            model.fit(X_scaled, y_scaled, X_min, X_max)
            res = model.predict(X_samples, constraint_helper=constraint_helper)

    best_config_idx = np.argmin(res.minl.ravel())
    best_config = res.minl_conf[best_config_idx, :]
    best_config = X_scaler.inverse_transform(best_config)

    if ENABLE_DUMMY_ENCODER:
        # Decode one-hot encoding into categorical knobs
        best_config = dummy_encoder.inverse_transform(best_config)

    # Although we have max/min limits in the GPRGD training session, it may
    # lose some precisions. e.g. 0.99..99 >= 1.0 may be True on the scaled data,
    # when we inversely transform the scaled data, the different becomes much larger
    # and cannot be ignored. Here we check the range on the original data
    # directly, and make sure the recommended config lies within the range
    X_min_inv = X_scaler.inverse_transform(X_min)
    X_max_inv = X_scaler.inverse_transform(X_max)
    best_config = np.minimum(best_config, X_max_inv)
    best_config = np.maximum(best_config, X_min_inv)

    conf_map = {k: best_config[i] for i, k in enumerate(X_columnlabels)}
    conf_map_res = create_and_save_recommendation(
        recommended_knobs=conf_map,
        result=newest_result,
        status='good',
        info='INFO: training data size is {}'.format(X_scaled.shape[0]),
        pipeline_run=target_data['pipeline_run'])
    LOG.debug('%s: Finished selecting the next config.\n\ndata=%s\n',
              AlgorithmType.name(algorithm),
              JSONUtil.dumps(conf_map_res, pprint=True))

    return conf_map_res
Esempio n. 2
0
def configuration_recommendation(recommendation_input):
    target_data, algorithm = recommendation_input
    LOG.info('configuration_recommendation called')

    if target_data['bad'] is True:
        target_data_res = dict(
            status='bad',
            result_id=target_data['newest_result_id'],
            info='WARNING: no training data, the config is generated randomly',
            recommendation=target_data['config_recommend'],
            pipeline_run=target_data['pipeline_run'])
        LOG.debug('%s: Skipping configuration recommendation.\n\ndata=%s\n',
                  AlgorithmType.name(algorithm),
                  JSONUtil.dumps(target_data, pprint=True))
        return target_data_res

    # Load mapped workload data
    mapped_workload_id = target_data['mapped_workload'][0]

    latest_pipeline_run = PipelineRun.objects.get(
        pk=target_data['pipeline_run'])
    mapped_workload = Workload.objects.get(pk=mapped_workload_id)
    workload_knob_data = PipelineData.objects.get(
        pipeline_run=latest_pipeline_run,
        workload=mapped_workload,
        task_type=PipelineTaskType.KNOB_DATA)
    workload_knob_data = JSONUtil.loads(workload_knob_data.data)
    workload_metric_data = PipelineData.objects.get(
        pipeline_run=latest_pipeline_run,
        workload=mapped_workload,
        task_type=PipelineTaskType.METRIC_DATA)
    workload_metric_data = JSONUtil.loads(workload_metric_data.data)

    newest_result = Result.objects.get(pk=target_data['newest_result_id'])
    cleaned_workload_knob_data = clean_knob_data(
        workload_knob_data["data"], workload_knob_data["columnlabels"],
        newest_result.session)

    X_workload = np.array(cleaned_workload_knob_data[0])
    X_columnlabels = np.array(cleaned_workload_knob_data[1])
    y_workload = np.array(workload_metric_data['data'])
    y_columnlabels = np.array(workload_metric_data['columnlabels'])
    rowlabels_workload = np.array(workload_metric_data['rowlabels'])

    # Target workload data
    newest_result = Result.objects.get(pk=target_data['newest_result_id'])
    X_target = target_data['X_matrix']
    y_target = target_data['y_matrix']
    rowlabels_target = np.array(target_data['rowlabels'])

    if not np.array_equal(X_columnlabels, target_data['X_columnlabels']):
        raise Exception(('The workload and target data should have '
                         'identical X columnlabels (sorted knob names)'))
    if not np.array_equal(y_columnlabels, target_data['y_columnlabels']):
        raise Exception(('The workload and target data should have '
                         'identical y columnlabels (sorted metric names)'))

    # Filter Xs by top 10 ranked knobs
    ranked_knobs = PipelineData.objects.get(
        pipeline_run=latest_pipeline_run,
        workload=mapped_workload,
        task_type=PipelineTaskType.RANKED_KNOBS)
    ranked_knobs = JSONUtil.loads(ranked_knobs.data)[:IMPORTANT_KNOB_NUMBER]
    ranked_knob_idxs = [
        i for i, cl in enumerate(X_columnlabels) if cl in ranked_knobs
    ]
    X_workload = X_workload[:, ranked_knob_idxs]
    X_target = X_target[:, ranked_knob_idxs]
    X_columnlabels = X_columnlabels[ranked_knob_idxs]

    # Filter ys by current target objective metric
    target_objective = newest_result.session.target_objective
    target_obj_idx = [
        i for i, cl in enumerate(y_columnlabels) if cl == target_objective
    ]
    if len(target_obj_idx) == 0:
        raise Exception(('Could not find target objective in metrics '
                         '(target_obj={})').format(target_objective))
    elif len(target_obj_idx) > 1:
        raise Exception(
            ('Found {} instances of target objective in '
             'metrics (target_obj={})').format(len(target_obj_idx),
                                               target_objective))

    metric_meta = db.target_objectives.get_metric_metadata(
        newest_result.session.dbms.pk, newest_result.session.target_objective)
    lessisbetter = metric_meta[
        target_objective].improvement == db.target_objectives.LESS_IS_BETTER

    y_workload = y_workload[:, target_obj_idx]
    y_target = y_target[:, target_obj_idx]
    y_columnlabels = y_columnlabels[target_obj_idx]

    # Combine duplicate rows in the target/workload data (separately)
    X_workload, y_workload, rowlabels_workload = DataUtil.combine_duplicate_rows(
        X_workload, y_workload, rowlabels_workload)
    X_target, y_target, rowlabels_target = DataUtil.combine_duplicate_rows(
        X_target, y_target, rowlabels_target)

    # Delete any rows that appear in both the workload data and the target
    # data from the workload data
    dups_filter = np.ones(X_workload.shape[0], dtype=bool)
    target_row_tups = [tuple(row) for row in X_target]
    for i, row in enumerate(X_workload):
        if tuple(row) in target_row_tups:
            dups_filter[i] = False
    X_workload = X_workload[dups_filter, :]
    y_workload = y_workload[dups_filter, :]
    rowlabels_workload = rowlabels_workload[dups_filter]

    # Combine target & workload Xs for preprocessing
    X_matrix = np.vstack([X_target, X_workload])

    # Dummy encode categorial variables
    categorical_info = DataUtil.dummy_encoder_helper(X_columnlabels,
                                                     mapped_workload.dbms)
    dummy_encoder = DummyEncoder(categorical_info['n_values'],
                                 categorical_info['categorical_features'],
                                 categorical_info['cat_columnlabels'],
                                 categorical_info['noncat_columnlabels'])
    X_matrix = dummy_encoder.fit_transform(X_matrix)

    # below two variables are needed for correctly determing max/min on dummies
    binary_index_set = set(categorical_info['binary_vars'])
    total_dummies = dummy_encoder.total_dummies()

    # Scale to N(0, 1)
    X_scaler = StandardScaler()
    X_scaled = X_scaler.fit_transform(X_matrix)
    if y_target.shape[0] < 5:  # FIXME
        # FIXME (dva): if there are fewer than 5 target results so far
        # then scale the y values (metrics) using the workload's
        # y_scaler. I'm not sure if 5 is the right cutoff.
        y_target_scaler = None
        y_workload_scaler = StandardScaler()
        y_matrix = np.vstack([y_target, y_workload])
        y_scaled = y_workload_scaler.fit_transform(y_matrix)
    else:
        # FIXME (dva): otherwise try to compute a separate y_scaler for
        # the target and scale them separately.
        try:
            y_target_scaler = StandardScaler()
            y_workload_scaler = StandardScaler()
            y_target_scaled = y_target_scaler.fit_transform(y_target)
            y_workload_scaled = y_workload_scaler.fit_transform(y_workload)
            y_scaled = np.vstack([y_target_scaled, y_workload_scaled])
        except ValueError:
            y_target_scaler = None
            y_workload_scaler = StandardScaler()
            y_scaled = y_workload_scaler.fit_transform(y_target)

    # Set up constraint helper
    constraint_helper = ParamConstraintHelper(
        scaler=X_scaler,
        encoder=dummy_encoder,
        binary_vars=categorical_info['binary_vars'],
        init_flip_prob=INIT_FLIP_PROB,
        flip_prob_decay=FLIP_PROB_DECAY)

    # FIXME (dva): check if these are good values for the ridge
    # ridge = np.empty(X_scaled.shape[0])
    # ridge[:X_target.shape[0]] = 0.01
    # ridge[X_target.shape[0]:] = 0.1

    # FIXME: we should generate more samples and use a smarter sampling
    # technique
    num_samples = NUM_SAMPLES
    X_samples = np.empty((num_samples, X_scaled.shape[1]))
    X_min = np.empty(X_scaled.shape[1])
    X_max = np.empty(X_scaled.shape[1])
    X_scaler_matrix = np.zeros([1, X_scaled.shape[1]])

    session_knobs = SessionKnob.objects.get_knobs_for_session(
        newest_result.session)

    # Set min/max for knob values
    for i in range(X_scaled.shape[1]):
        if i < total_dummies or i in binary_index_set:
            col_min = 0
            col_max = 1
        else:
            col_min = X_scaled[:, i].min()
            col_max = X_scaled[:, i].max()
            for knob in session_knobs:
                if X_columnlabels[i] == knob["name"]:
                    X_scaler_matrix[0][i] = knob["minval"]
                    col_min = X_scaler.transform(X_scaler_matrix)[0][i]
                    X_scaler_matrix[0][i] = knob["maxval"]
                    col_max = X_scaler.transform(X_scaler_matrix)[0][i]
        X_min[i] = col_min
        X_max[i] = col_max
        X_samples[:, i] = np.random.rand(num_samples) * (col_max -
                                                         col_min) + col_min

    # Maximize the throughput, moreisbetter
    # Use gradient descent to minimize -throughput
    if not lessisbetter:
        y_scaled = -y_scaled

    q = queue.PriorityQueue()
    for x in range(0, y_scaled.shape[0]):
        q.put((y_scaled[x][0], x))

    i = 0
    while i < TOP_NUM_CONFIG:
        try:
            item = q.get_nowait()
            # Tensorflow get broken if we use the training data points as
            # starting points for GPRGD. We add a small bias for the
            # starting points. GPR_EPS default value is 0.001
            # if the starting point is X_max, we minus a small bias to
            # make sure it is within the range.
            dist = sum(np.square(X_max - X_scaled[item[1]]))
            if dist < 0.001:
                X_samples = np.vstack(
                    (X_samples, X_scaled[item[1]] - abs(GPR_EPS)))
            else:
                X_samples = np.vstack(
                    (X_samples, X_scaled[item[1]] + abs(GPR_EPS)))
            i = i + 1
        except queue.Empty:
            break

    session = newest_result.session
    res = None

    if algorithm == AlgorithmType.DNN:
        # neural network model
        model_nn = NeuralNet(n_input=X_samples.shape[1],
                             batch_size=X_samples.shape[0],
                             explore_iters=DNN_EXPLORE_ITER,
                             noise_scale_begin=DNN_NOISE_SCALE_BEGIN,
                             noise_scale_end=DNN_NOISE_SCALE_END,
                             debug=DNN_DEBUG,
                             debug_interval=DNN_DEBUG_INTERVAL)
        if session.dnn_model is not None:
            model_nn.set_weights_bin(session.dnn_model)
        model_nn.fit(X_scaled, y_scaled, fit_epochs=DNN_TRAIN_ITER)
        res = model_nn.recommend(X_samples,
                                 X_min,
                                 X_max,
                                 explore=DNN_EXPLORE,
                                 recommend_epochs=MAX_ITER)
        session.dnn_model = model_nn.get_weights_bin()
        session.save()

    elif algorithm == AlgorithmType.GPR:
        # default gpr model
        if USE_GPFLOW:
            model_kwargs = {}
            model_kwargs['model_learning_rate'] = HP_LEARNING_RATE
            model_kwargs['model_maxiter'] = HP_MAX_ITER
            opt_kwargs = {}
            opt_kwargs['learning_rate'] = DEFAULT_LEARNING_RATE
            opt_kwargs['maxiter'] = MAX_ITER
            opt_kwargs['bounds'] = [X_min, X_max]
            ucb_beta = 'get_beta_td'
            opt_kwargs['ucb_beta'] = ucb.get_ucb_beta(ucb_beta,
                                                      scale=DEFAULT_UCB_SCALE,
                                                      t=i + 1.,
                                                      ndim=X_scaled.shape[1])
            tf.reset_default_graph()
            graph = tf.get_default_graph()
            gpflow.reset_default_session(graph=graph)
            m = gpr_models.create_model('BasicGP',
                                        X=X_scaled,
                                        y=y_scaled,
                                        **model_kwargs)
            res = tf_optimize(m.model, X_samples, **opt_kwargs)
        else:
            model = GPRGD(length_scale=DEFAULT_LENGTH_SCALE,
                          magnitude=DEFAULT_MAGNITUDE,
                          max_train_size=MAX_TRAIN_SIZE,
                          batch_size=BATCH_SIZE,
                          num_threads=NUM_THREADS,
                          learning_rate=DEFAULT_LEARNING_RATE,
                          epsilon=DEFAULT_EPSILON,
                          max_iter=MAX_ITER,
                          sigma_multiplier=DEFAULT_SIGMA_MULTIPLIER,
                          mu_multiplier=DEFAULT_MU_MULTIPLIER)
            model.fit(X_scaled, y_scaled, X_min, X_max, ridge=DEFAULT_RIDGE)
            res = model.predict(X_samples, constraint_helper=constraint_helper)

    best_config_idx = np.argmin(res.minl.ravel())
    best_config = res.minl_conf[best_config_idx, :]
    best_config = X_scaler.inverse_transform(best_config)
    # Decode one-hot encoding into categorical knobs
    best_config = dummy_encoder.inverse_transform(best_config)

    # Although we have max/min limits in the GPRGD training session, it may
    # lose some precisions. e.g. 0.99..99 >= 1.0 may be True on the scaled data,
    # when we inversely transform the scaled data, the different becomes much larger
    # and cannot be ignored. Here we check the range on the original data
    # directly, and make sure the recommended config lies within the range
    X_min_inv = X_scaler.inverse_transform(X_min)
    X_max_inv = X_scaler.inverse_transform(X_max)
    best_config = np.minimum(best_config, X_max_inv)
    best_config = np.maximum(best_config, X_min_inv)

    conf_map = {k: best_config[i] for i, k in enumerate(X_columnlabels)}
    conf_map_res = dict(status='good',
                        result_id=target_data['newest_result_id'],
                        recommendation=conf_map,
                        info='INFO: training data size is {}'.format(
                            X_scaled.shape[0]),
                        pipeline_run=latest_pipeline_run.pk)
    LOG.debug('%s: Finished selecting the next config.\n\ndata=%s\n',
              AlgorithmType.name(algorithm),
              JSONUtil.dumps(conf_map_res, pprint=True))

    return conf_map_res
Esempio n. 3
0
def gpr_new(env, config, n_loops=100):
    model_name = 'BasicGP'
    model_opt_frequency = 0
    model_kwargs = {}
    model_kwargs['model_learning_rate'] = 0.001
    model_kwargs['model_maxiter'] = 5000
    opt_kwargs = {}
    opt_kwargs['learning_rate'] = 0.01
    opt_kwargs['maxiter'] = 500

    results = []
    x_axis = []
    memory = ReplayMemory()
    num_samples = config['num_samples']
    num_collections = config['num_collections']
    X_min = np.zeros(env.knob_dim)
    X_max = np.ones(env.knob_dim)
    X_bounds = [X_min, X_max]
    opt_kwargs['bounds'] = X_bounds

    for _ in range(num_collections):
        action = np.random.rand(env.knob_dim)
        reward, _ = env.simulate(action)
        memory.push(action, reward)

    for i in range(n_loops):
        X_samples = np.random.rand(num_samples, env.knob_dim)
        if i >= 5:
            actions, rewards = memory.get_all()
            tuples = tuple(zip(actions, rewards))
            top10 = heapq.nlargest(10, tuples, key=lambda e: e[1])
            for entry in top10:
                # Tensorflow get broken if we use the training data points as
                # starting points for GPRGD.
                X_samples = np.vstack(
                    (X_samples, np.array(entry[0]) * 0.97 + 0.01))

        actions, rewards = memory.get_all()

        ucb_beta = config['beta']
        opt_kwargs['ucb_beta'] = ucb.get_ucb_beta(ucb_beta,
                                                  scale=config['scale'],
                                                  t=i + 1.,
                                                  ndim=env.knob_dim)
        if model_opt_frequency > 0:
            optimize_hyperparams = i % model_opt_frequency == 0
            if not optimize_hyperparams:
                model_kwargs['hyperparameters'] = hyperparameters
        else:
            optimize_hyperparams = False
            model_kwargs['hyperparameters'] = None
        model_kwargs['optimize_hyperparameters'] = optimize_hyperparams
        X_new, ypred, _, hyperparameters = run_optimize(
            np.array(actions), -np.array(rewards), X_samples, model_name,
            opt_kwargs, model_kwargs)

        sort_index = np.argsort(ypred.squeeze())
        X_new = X_new[sort_index]
        ypred = ypred[sort_index].squeeze()
        action = X_new[0]
        reward, _ = env.simulate(action)
        memory.push(action, reward)
        LOG.info('loop: %d reward: %f', i, reward[0])
        results.append(reward)
        x_axis.append(i + 1)

    return np.array(results), np.array(x_axis)