示例#1
0
def __embed_ml_models(bm: benchmarks.Benchmark, mdl: model.Model, regressor,
                      classifier):
    backend = cplex_backend.CplexBackend()
    x_vars = [
        mdl.get_var_by_name('x_{}'.format(i)) for i in range(bm.vars_number)
    ]
    y_var = mdl.get_var_by_name('y')
    reg_em = __eml_regressors[args.regressor_type](bm, regressor)
    nn_embed.encode(backend, reg_em, mdl, x_vars, y_var, 'regressor')

    class_var = mdl.get_var_by_name('class')
    cls_em = __eml_classifiers[args.classifier_type](bm, classifier)

    dt_embed.encode_backward_implications(backend, cls_em, mdl, x_vars,
                                          class_var, 'classifier')
示例#2
0
def __eml_regressor_nn(bm: benchmarks.Benchmark, regressor):
    regressor_em = keras_reader.read_keras_sequential(regressor)
    regressor_em.reset_bounds()
    for neuron in regressor_em.layer(0).neurons():
        neuron.update_lb(args.min_bits_number)
        neuron.update_ub(args.max_bits_number)

    process.ibr_bounds(regressor_em)

    if bm.vars_number > __n_vars_bounds_tightening:
        bounds_backend = cplex_backend.CplexBackend()
        process.fwd_bound_tighthening(bounds_backend,
                                      regressor_em,
                                      timelimit=__bounds_opt_time_limit)

    return regressor_em
示例#3
0
def solve_opt_model(benchmark, mdl, trgt_error_ratio_log_exp, regr, mae, rmse, 
        underest_ratio, classr, acc, prev_bit_sum, increase_tot_nbits,
        train_set_size, large_err_thresh, n_iter=-1, wrong_config=[], 
        debug=True):
    if mdl == None:
        if debug:
            print("\tMP model needs to be created or loaded")
        trgt_error_ratio_log_exp = math.ceil(trgt_error_ratio_log_exp)
        robust_param = global_robust_param 

        # The requested target error (plus eventual other stuff) can be
        # infeasible, according to the prediction model, because it could be
        # greater than the largest error that can be estimated (we are dealing 
        # with exponents here, hence the emphasis on _greater_ values). The  
        # largest possible error attainable according to prediction model is:
        # max_error = pred_model(max_nbit, max_nbit, max_nbit). If we encounter 
        # this situation we force the optimizer to return the config with max 
        # nbits for each var, since the opt problem would be unsolvable
        max_conf = [max_nbit for i in range(benchmark_nVar[benchmark])]
        max_conf_dict = {}
        for i in range(len(max_conf)):
            max_conf_dict['var_{}'.format(i)] = [max_conf[i]]
        max_conf_df = pd.DataFrame.from_dict(max_conf_dict)
        max_predictable_error = regr.predict(max_conf_df)[0][0]
        if max_predictable_error < trgt_error_ratio_log_exp:
            dif = trgt_error_ratio_log_exp - max_predictable_error
            robust_param = -math.ceil(dif)

        # Build a backend object
        bkd = cplex_backend.CplexBackend()

        # create MP model from scratch
        mdl = create_MP_model(benchmark, bkd, 
                trgt_error_ratio_log_exp, regr, mae, rmse, underest_ratio, 
                classr, acc, robust_param, large_err_thresh, debug)

        # the error constraint changes depending on the program input
        y_var = mdl.get_var_by_name('y')
        mdl.add_constraint(y_var >= trgt_error_ratio_log_exp + robust_param)

        sum_bit_var = mdl.get_var_by_name('sum_bit')
        if increase_tot_nbits:
            new_nbit_sum = math.ceil(prev_bit_sum + increase_step)
            mdl.add_constraint(sum_bit_var >= new_nbit_sum)
        else:
            mdl.add_constraint(sum_bit_var >= prev_bit_sum)
            new_nbit_sum = prev_bit_sum

    '''
    Cut previous solutions
    '''
    if n_iter > 0 and len(wrong_config) > 1:
        if debug:
            print("\t Cut previous solution")
        cut_solution(mdl, wrong_config, n_iter)

    '''
    Solve optimization model
    '''
    mdl.set_time_limit(opt_time_limit)
    if debug:
        print('=== Starting the solution process (Timelimit {}s)'.format(
            opt_time_limit))
    before_solve_time = time.time()

    sol = mdl.solve()
    after_solve_time = time.time()
    if debug:
        print("Time needed to solve MP & EML model {}".format(
            after_solve_time - before_solve_time))

    if sol is None:
        if debug:
            print('No solution found')
        opt_config = None
    else:
        opt_config = []
        if debug:
            print('=== Solution Data')
            print('Solution time: {:.2f} (sec)'.format(
                mdl.solve_details.time))
            print('Solver status: {}'.format(sol.solve_details.status))
        for i in range(benchmark_nVar[benchmark]):
            name='x_{}'.format(i)
            if debug:
                print('\t# Bits for {}: {}'.format(name, sol[name]))
            opt_config.append(int(sol[name]))
        if debug:
            print('\tY value: {}'.format(sol['y']))

    return opt_config, mdl, new_nbit_sum
示例#4
0
def create_MP_model(benchmark, bkd, trgt_error_ratio_log_exp, regr, mae, rmse,
        underest_ratio, classr, acc, robust_param, large_err_thresh,
        debug=True):

    before_modelEM_time = time.time()
    '''
    Create optimization model
    '''
    # Build a docplex model
    mdl = cpx.Model()

    x_vars = []
    for i in range(benchmark_nVar[benchmark]):
        x_vars.append(mdl.integer_var(
            lb=min_nbit, ub=max_nbit, name='x_{}'.format(i)))

    # output of the NN is a floating number, the negative log of error exp  
    # the upper bound is computed in function of the max target error exp
    ub_y = -np.log(float('1e-' + str(max_target_error_exp)))
    # y might have negative values, in case the predicted error is very large
    # artificially tighten the bound of y_var in order to limit its range: the
    # lower bound corresponds to neg log of the error threshol for large error
    lb_y = -np.log(large_err_thresh)
    y_var = mdl.continuous_var(lb=lb_y, ub=ub_y, name='y')

    lb_sum_var = min_nbit * benchmark_nVar[benchmark]
    ub_sum_var = max_nbit * benchmark_nVar[benchmark]
    sum_bit_var = mdl.integer_var(lb=lb_sum_var, ub=ub_sum_var, name='sum_bit')

    # c_var represents the config class (large or small error)
    if classr != None:
        c_var = mdl.continuous_var(lb=0, ub=1, name='c')

    '''
    EML Regressor
    '''
    # convert Keras NN in EML format
    regr_em = keras_reader.read_keras_sequential(regr)
    # Reset existing bounds (just to ensure idempotence)
    regr_em.reset_bounds()
    # Enforce basic input bounds
    in_layer = regr_em.layer(0)
    for neuron in in_layer.neurons():
        neuron.update_lb(min_nbit)
        neuron.update_ub(max_nbit)

    before_regrNN_propagate_time = time.time()

    # Compute bounds for the hidden neurons via Interval Based Reasoning
    nprocess.ibr_bounds(regr_em)

    # Tighten bounds via MILP (only for benchmarms with many vars)
    if benchmark_nVar[benchmark] > nvar_bounds_tightening:
        bounds_bkd = cplex_backend.CplexBackend()
        nprocess.fwd_bound_tighthening(bounds_bkd, regr_em, 
                timelimit=bounds_opt_time_limit)
        if debug:
            print("- Tighten bounds")

    after_regrNN_propagate_time = time.time()
    if debug:
        print("Time needed to compute bounds of NN regressor {0}".format(
            after_regrNN_propagate_time - before_regrNN_propagate_time))

    nembed.encode(bkd, regr_em, mdl, x_vars, y_var, 'regr_NN')

    '''
    EML Classifier
    '''
    if classr != None:
        classr_em = sklearn_reader.read_sklearn_tree(classr)

        # the bounds for the DT attributes need to be manually specified
        for attr in classr_em.attributes():
            classr_em.update_ub(attr, max_nbit)
            classr_em.update_lb(attr, min_nbit)

        tembed.encode_backward_implications(bkd, classr_em, mdl, x_vars, 
                c_var, 'classr_DT')

        # add constraints on not allowed configurations (the ones found by the 
        # classifier --> the (predicted) class of the config must be the small
        # error class (corresponding to value 0; 1 --> large error)
        #     ==> predicted class < 0.5
        mdl.add_constraint(c_var <= classifier_threshold)
        mdl.add_constraint(sum_bit_var == sum(x_vars))

    # obj: minimize number of bit per variable
    mdl.minimize(mdl.sum(x_vars))

    after_modelEM_time = time.time()
    if debug:
        print("Time needed to create MP model {}".format(
            after_modelEM_time - before_modelEM_time))

    return mdl
示例#5
0

def laod_keras_net():
    with open('nn_reg.json') as f:
        knet = model = model_from_json(f.read())
    wgt_fname = os.path.join('nn_reg.h5')
    knet.load_weights(wgt_fname)
    return knet


def convert_keras_net(knet):
    net = keras_reader.read_keras_sequential(knet)
    return net


bkd = cplex_backend.CplexBackend()
mdl = cpx.Model()

knet = laod_keras_net()
net = convert_keras_net(knet)

# create variables
X0_var = mdl.continuous_var(lb=0, ub=1, name='in_var0')
X1_var = mdl.continuous_var(lb=0, ub=1, name='in_var1')
Y_var = mdl.continuous_var(lb=0, ub=1, name='out_var')

# encode model
encode(bkd, net, mdl, [X0_var, X1_var], Y_var, 'net_econding')

# create constraints
mdl.add_constraint(X0_var <= 0.2 * X1_var)