示例#1
0
def main():
    # Load the necessary data
    current_path = os.path.dirname(os.path.realpath(__file__))
    data = pd.read_csv(os.path.join(current_path, 'data_files',
                                    'six_hump_data_2400.txt'),
                       sep='\s+',
                       header=None,
                       index_col=None)

    # Scale the features only - not compulsory as scaling is done at backend anyway.
    sd = sp.FeatureScaling()
    data_scaled_x, data_min, data_max = sd.data_scaling_minmax(
        data.values[:, :-1])
    y_r = data.values[:, -1]
    data_scaled = np.concatenate((data_scaled_x, y_r.reshape(y_r.shape[0], 1)),
                                 axis=1)

    # Select 100 samples for Kriging training
    no_training_samples = 50
    b = sp.HammersleySampling(data_scaled, no_training_samples, 'selection')
    training_data = b.sample_points()

    # Kriging training
    aa = krg.KrigingModel(training_data,
                          numerical_gradients=False,
                          overwrite=True)
    fv = aa.get_feature_vector()
    ab = aa.training()

    # Print Pyomo expression from input variables
    list_vars = []
    for i in fv.keys():
        list_vars.append(fv[i])
    print('The Kriging expression is: \n eq = ',
          aa.generate_expression(list_vars))

    # Evaluate Kriging model at points not in the training dataset and calculate R^2
    x_pred = data_scaled[:, :-1]
    y_pred = aa.predict_output(x_pred)
    r2 = aa.r2_calculation(data_scaled[:, -1], y_pred)
    print('The R^2 value is: ', r2)

    # 3D error (deviation) plot
    difference_vector = data_scaled[:, 2] - y_pred[:, 0]
    x1 = np.linspace(-3, 3, 61)
    x2 = np.linspace(-2, 2, 41)
    X1, X2 = np.meshgrid(
        x1, x2,
        indexing='ij')  # ij indicates matrix arrangement which is what we have
    Y = difference_vector.reshape(61, 41)
    ax = plt.axes(projection='3d')
    ax.plot_surface(X1, X2, Y, cmap='viridis', edgecolor='none')
    # ax.scatter3D(training_data[:, 0], training_data[:, 1], training_data[:, 2]-y_training_pred[:, 0], c='r', marker='^', s=200, depthshade=False)
    ax.set_xlabel('x1')
    ax.set_ylabel('x2')
    ax.set_zlabel('Error')
    plt.show()
示例#2
0
def main():
    # Load the necessary data
    current_path = os.path.dirname(os.path.realpath(__file__))
    data = pd.read_csv(os.path.join(current_path, 'data_files',
                                    'griewank_data.txt'),
                       sep='\s+',
                       header=None,
                       index_col=None)

    # Scale the data and select 310 sample points via Hammersley sampling
    sd = sp.FeatureScaling()
    data_scaled, data_min, data_max = sd.data_scaling_minmax(data)
    no_training_samples = 310
    b = sp.HammersleySampling(data_scaled,
                              no_training_samples,
                              sampling_type='selection')
    training_data = b.sample_points()

    # Fit an RBF model to 310 selected points
    f1 = RadialBasisFunctions(training_data,
                              basis_function='gaussian',
                              solution_method='pyomo',
                              regularization=True,
                              overwrite=True)
    p = f1.get_feature_vector()
    f1.training()

    # Predict values for other points in loaded data not used in training, evaluate R^2
    y_predicted_pyomo = f1.predict_output(data_scaled[:, :-1])
    r2_pyomo = f1.r2_calculation(data_scaled[:, -1], y_predicted_pyomo)
    print(r2_pyomo)

    # Print RBF expression based on headers of input data
    list_vars = []
    for i in p.keys():
        list_vars.append(p[i])
    print('\nThe RBF expression is: \n', f1.generate_expression(list_vars))

    # 3-D plot of errors between prediction and actual values over 10,200 points
    yy = sp.FeatureScaling.data_unscaling_minmax(y_predicted_pyomo,
                                                 data_min[0, 2], data_max[0,
                                                                          2])
    x1 = np.linspace(-20, 20, 101)
    x2 = np.linspace(-20, 20, 101)
    X1, X2 = np.meshgrid(x1, x2)
    Y = yy.reshape(101, 101)
    ax = plt.axes(projection='3d')
    ax.plot_surface(X2, X1, Y, cmap='viridis', edgecolor='none')
    ax.set_xlabel('x1')
    ax.set_ylabel('x2')
    ax.set_zlabel('y')
    plt.show()

    plt.plot(data_scaled[:, 2], data_scaled[:, 2], '-', data_scaled[:, 2],
             y_predicted_pyomo, 'o')
    plt.show()
def main():
    # Load the necessary data
    current_path = os.path.dirname(os.path.realpath(__file__))
    data = pd.read_csv(os.path.join(current_path, 'data_files',
                                    'three_humpback_data_v4.csv'),
                       header=0,
                       index_col=0)

    # Scale the data and select 200 sampling points via Hammersley sampling
    sd = sp.FeatureScaling()
    data_scaled, data_min, data_max = sd.data_scaling_minmax(data)
    no_training_samples = 100
    b = sp.HammersleySampling(data_scaled, no_training_samples, 'selection')
    training_data = b.sample_points()

    # Fit an RBF model
    f1 = RadialBasisFunctions(training_data,
                              basis_function='gaussian',
                              solution_method='pyomo',
                              regularization=True,
                              overwrite=True)
    p = f1.get_feature_vector()
    f1.training()

    # Predict values for other points in loaded data not used in training, evaluate R^2
    y_predicted_pyomo = f1.predict_output(data_scaled[:, :-1])
    r2_pyomo = f1.r2_calculation(data_scaled[:, -1], y_predicted_pyomo)
    print('The R2 value over 10201 off-design points is:', r2_pyomo)

    # Prinr RBF expression based on headers of input data
    list_vars = []
    for i in p.keys():
        list_vars.append(p[i])
    print('\nThe RBF expression is: \n', f1.generate_expression(list_vars))

    # Contour plots for surrogate and actual function
    fig, (ax1, ax2) = plt.subplots(ncols=2)
    img1 = ax1.contourf(np.linspace(0, 1, 101),
                        np.linspace(0, 1, 101),
                        y_predicted_pyomo.reshape(101, 101),
                        levels=100,
                        cmap='tab20')
    ax1.grid()
    ax1.set_title('Surrogate')
    fig.colorbar(img1, ax=ax1)
    img2 = ax2.contourf(np.linspace(0, 1, 101),
                        np.linspace(0, 1, 101),
                        data_scaled[:, 2].reshape(101, 101),
                        levels=100,
                        cmap='tab20')
    ax2.grid()
    ax2.set_title('Real')
    fig.colorbar(img2, ax=ax2)
    plt.show()
示例#4
0
def main():
    current_path = os.path.dirname(os.path.realpath(__file__))
    data = pd.read_csv(os.path.join(current_path, 'data_files', 'six_hump_function_data.tab'), sep='\t', header=0, index_col=0)
    bounds_list = [[-1, -1], [1, 1]]

    b1 = sp.LatinHypercubeSampling(data, 100, sampling_type='selection')
    td11 = b1.sample_points()

    b2 = sp.LatinHypercubeSampling(bounds_list, 100, sampling_type='creation')
    td12 = b2.sample_points()

    c1 = sp.HaltonSampling(data, 100, sampling_type='selection')
    td21 = c1.sample_points()

    c2 = sp.HaltonSampling(bounds_list, 100, sampling_type='creation')
    td22 = c2.sample_points()

    d1 = sp.HammersleySampling(data, 100, sampling_type='selection')
    td31 = d1.sample_points()

    d2 = sp.HammersleySampling(bounds_list, 100, sampling_type='creation')
    td32 = d2.sample_points()

    e1 = sp.CVTSampling(data, 100, tolerance=1e-6, sampling_type='selection')
    td41 = e1.sample_points()

    e2 = sp.CVTSampling(bounds_list, 100, tolerance=1e-6, sampling_type='creation')
    td42 = e2.sample_points()

    f1 = sp.UniformSampling(data, [10, 10], 'selection')
    td51 = f1.sample_points()

    f2 = sp.UniformSampling(bounds_list, [10, 10], 'creation')
    td52 = f2.sample_points()

    g1 = sp.UniformSampling(data, [10, 10], 'selection', False)
    td61 = g1.sample_points()

    g2 = sp.UniformSampling(bounds_list, [10, 10], 'creation', False)
    td62 = g2.sample_points()

    ax1 = plt.subplot(3, 4, 1)
    ax1.plot(td11.values[:, 0], td11.values[:, 1], 'o')
    ax1.grid()
    ax1.set_title('LHS - selection')

    ax2 = plt.subplot(3, 4, 2)
    ax2.plot(td12[:, 0], td12[:, 1], 'o')
    ax2.grid()
    ax2.set_title('LHS - creation')

    ax3 = plt.subplot(3, 4, 3)
    ax3.plot(td21.values[:, 0], td21.values[:, 1], 'o')
    ax3.grid()
    ax3.set_title('Halton - selection')

    ax4 = plt.subplot(3, 4, 4)
    ax4.plot(td22[:, 0], td22[:, 1], 'o')
    ax4.grid()
    ax4.set_title('Halton - creation')

    ax5 = plt.subplot(3, 4, 5)
    ax5.plot(td31.values[:, 0], td31.values[:, 1], 'o')
    ax5.grid()
    ax3.set_title('Hammersley - selection')

    ax6 = plt.subplot(3, 4, 6)
    ax6.plot(td32[:, 0], td32[:, 1], 'o')
    ax6.grid()
    ax6.set_title('Hammersley - creation')

    ax7 = plt.subplot(3, 4, 7)
    ax7.plot(td41.values[:, 0], td41.values[:, 1], 'o')
    ax7.grid()
    ax7.set_title('CVT - selection')

    ax8 = plt.subplot(3, 4, 8)
    ax8.plot(td42[:, 0], td42[:, 1], 'o')
    ax8.grid()
    ax8.set_title('CVT - creation')

    ax9 = plt.subplot(3, 4, 9)
    ax9.plot(td51.values[:, 0], td51.values[:, 1], 'o')
    ax9.grid()
    ax9.set_title('Uniform (edges) - selection')

    ax10 = plt.subplot(3, 4, 10)
    ax10.plot(td52[:, 0], td52[:, 1], 'o')
    ax10.grid()
    ax10.set_title('Uniform (edges) - creation')

    ax11 = plt.subplot(3, 4, 11)
    ax11.plot(td61.values[:, 0], td61.values[:, 1], 'o')
    ax11.grid()
    ax11.set_title('Uniform (centres) - selection')

    ax12 = plt.subplot(3, 4, 12)
    ax12.plot(td62[:, 0], td62[:, 1], 'o')
    ax12.grid()
    ax12.set_title('Uniform (centres) - creation')

    plt.show()
示例#5
0
def buildROM(self, x, radius_base):
    """
    ``buildROM`` generates the surrogate models r(w) for the external functions

    :param x: Current point around which the surrogate must be generated
    :param radius_base: Sample radius. The sample points for surrogate generation will be generated between (x+radius_base) and (x-radius_base)
    :return: a number of surrogate related information, including:
             surrogate_objects: objects containing the surrogate expressions
             surrogate_objective: Objective function of the compatibility problem, ||y-r(w)|
             surrogate_constraints: Surrogate related constraints for the TPSPk and criticality problems, y-r(w)
             y1: True output values of the blackbox at x
    """

    y1 = self.evaluateDx(x)
    rom_params = []

    if (self.romtype == ROMType.linear0 or self.romtype == ROMType.linear1 or self.romtype == ROMType.quadratic or self.romtype == ROMType.kriging):
        # Trap all print to screens from sampling and PR scripts
        text_trap = io.StringIO()
        sys.stdout = text_trap

        # Create samples
        radius = radius_base  # * scale[j]
        x_lo = x - radius
        x_up = x + radius

        list_of_surrogates = []  # Will contain the surrogate parameters
        y_surrogates = []  # Will contain the output predictions from the surrogates when required
        surrogate_expressions = []  # Will contain the list of surrogate expressions
        surrogate_objects = []

        # For all external functions (verify!):
        for k in range(0, self.ly):
            surrogate_expressions.append([])
            surrogate_objects.append([])
            x_rel = []
            x_lo_rel = []
            x_up_rel = []
            for j in range(0, len(self.exfn_xvars_ind[k])):
                x_rel.append(x[self.exfn_xvars_ind[k][j]])
                x_lo_rel.append(x_lo[self.exfn_xvars_ind[k][j]])
                x_up_rel.append(x_up[self.exfn_xvars_ind[k][j]])
            x_bounds = [x_lo_rel, x_up_rel]

            #############################################################
            # Fix fraction for training and calculate number of samples based on number of features
            tr_split = 0.8
            if self.romtype == ROMType.linear0:
                num_sp = int(np.around(((len(x_lo_rel) + 1) * (1 / tr_split)))) 
            elif self.romtype == ROMType.linear1:
                num_sp = int(np.around(((0.5 * (len(x_lo_rel) + len(x_lo_rel) ** 2) + 1) * (1 / tr_split))))
            elif self.romtype == ROMType.quadratic:
                num_sp = int(np.around(((0.5 * (3 * len(x_lo_rel) + len(x_lo_rel) ** 2) + 1) * (1 / tr_split))))
            elif self.romtype == ROMType.kriging:
                num_sp = 25  # len(x_lo_rel) + 3 # number of features + s.d + mean + reg_param

            # # Calculate number of samples as twice the number of features
            # tr_split = 0.8
            # if self.romtype == ROMType.linear:
            #     num_sp = int((len(x_lo_rel) + 1) * 2) - 2
            # elif self.romtype == ROMType.quadratic:
            #     num_sp = int(np.around(((0.5 * (3 * len(x_lo_rel) + len(x_lo_rel) ** 2) + 1) * (2)))) - 2
            # elif self.romtype == ROMType.kriging:
            #     num_sp = 25  # len(x_lo_rel) + 3 # number of features + s.d + mean + reg_param
            #############################################################

            region_sampling = sampling.HammersleySampling(x_bounds, number_of_samples=num_sp,
                                                          sampling_type="creation")  # random number of samples
            values = region_sampling.sample_points()
            x_rel = np.array(x_rel)
            values = np.concatenate((x_rel.reshape(1, x_rel.shape[0]), values), axis=0)
            x_up_rel = np.array(x_up_rel)
            values = np.concatenate((x_up_rel.reshape(1, x_up_rel.shape[0]), values), axis=0)

            # b. generate output from actual function
            fcn = self.TRF.external_fcns[k]._fcn
            y_samples = []
            for j in range(0, values.shape[0]):
                y_samples.append(fcn._fcn(*values[j, :]))
            y_samples = np.array(y_samples)
            if y_samples.ndim == 1:
                y_samples = y_samples.reshape(len(y_samples), 1)

            # c. Generate a surrogate for each output and store in list_of_surrogates
            number_bb_outputs = y_samples.shape[1]
            for i in range(0, number_bb_outputs):
                surrogate_predictions = []
                training_samples = np.concatenate((values, y_samples[:, i].reshape(y_samples.shape[0], 1)), axis=1)

                # Generate pyomo equations: collect index of terms in indx, collect terms from xvars, then generate expression
                indx = self.exfn_xvars_ind[k]
                surr_vars = []
                for p in range(0, len(indx)):
                    surr_vars.append(self.TRF.xvars[indx[p]])

                if self.romtype == ROMType.linear0:
                    call_surrogate_method = polynomial_regression.PolynomialRegression(training_samples,
                                                                                       training_samples,
                                                                                       maximum_polynomial_order=1,
                                                                                       multinomials=0,
                                                                                       number_of_crossvalidations=3,
                                                                                       solution_method="mle",
                                                                                       training_split=tr_split)
                    p = call_surrogate_method.get_feature_vector()
                    call_surrogate_method.set_additional_terms([])
                    results = call_surrogate_method.poly_training()
                    surrogate_expressions[k].append(results.generate_expression(surr_vars))
                    surrogate_objects[k].append(lambda u, call_surrogate_method=call_surrogate_method,
                                                       results=results: call_surrogate_method.poly_predict_output(
                        results, u))
                    list_of_surrogates.append(results.optimal_weights_array.flatten().tolist())
                if self.romtype == ROMType.linear1:
                    call_surrogate_method = polynomial_regression.PolynomialRegression(training_samples,
                                                                                       training_samples,
                                                                                       maximum_polynomial_order=1,
                                                                                       multinomials=1,
                                                                                       number_of_crossvalidations=3,
                                                                                       solution_method="mle",
                                                                                       training_split=tr_split)
                    p = call_surrogate_method.get_feature_vector()
                    call_surrogate_method.set_additional_terms([])
                    results = call_surrogate_method.poly_training()
                    surrogate_expressions[k].append(results.generate_expression(surr_vars))
                    surrogate_objects[k].append(lambda u, call_surrogate_method=call_surrogate_method,
                                                       results=results: call_surrogate_method.poly_predict_output(
                        results, u))
                    list_of_surrogates.append(results.optimal_weights_array.flatten().tolist())
                elif self.romtype == ROMType.quadratic:
                    call_surrogate_method = polynomial_regression.PolynomialRegression(training_samples,
                                                                                       training_samples,
                                                                                       maximum_polynomial_order=2,
                                                                                       multinomials=1,
                                                                                       number_of_crossvalidations=3,
                                                                                       solution_method="mle",
                                                                                       training_split=tr_split)
                    p = call_surrogate_method.get_feature_vector()
                    call_surrogate_method.set_additional_terms([])
                    results = call_surrogate_method.poly_training()
                    surrogate_expressions[k].append(results.generate_expression(surr_vars))
                    surrogate_objects[k].append(lambda u, call_surrogate_method=call_surrogate_method,
                                                       results=results: call_surrogate_method.poly_predict_output(
                        results, u))
                    # surrogate_objects[k].append([call_surrogate_method, results])
                    if results.polynomial_order == 1:
                        no_comb_terms = int(
                            0.5 * len(self.exfn_xvars_ind[k]) * (len(self.exfn_xvars_ind[k]) - 1))
                        adjusted_vec_coeffs = np.zeros((1 + 2 * len(self.exfn_xvars_ind[k]) + no_comb_terms, 1))
                        adjusted_vec_coeffs[0, 0] = results.optimal_weights_array[0, 0]
                        adjusted_vec_coeffs[1:len(self.exfn_xvars_ind[k]) + 1,
                        0] = results.optimal_weights_array[1:len(self.exfn_xvars_ind[k]) + 1, 0]
                        adjusted_vec_coeffs[-no_comb_terms:, 0] = results.optimal_weights_array[-no_comb_terms:, 0]
                        list_of_surrogates.append(adjusted_vec_coeffs.flatten().tolist())
                    else:
                        list_of_surrogates.append(results.optimal_weights_array.flatten().tolist())
                elif self.romtype == ROMType.kriging:
                    call_surrogate_method = kriging.KrigingModel(training_samples, regularization=True,
                                                                 numerical_gradients=False)
                    p = call_surrogate_method.get_feature_vector()
                    results = call_surrogate_method.kriging_training()
                    surrogate_expressions[k].append(results.kriging_generate_expression(surr_vars))
                    surrogate_objects[k].append(lambda u, call_surrogate_method=call_surrogate_method,
                                                       results=results: call_surrogate_method.kriging_predict_output(
                        results, u))
                    list_of_surrogates.append(results.optimal_weights.flatten().tolist())

            # y_surrogates.append(surrogate_predictions)

            # Return in form of the original ROM function
            rom_params = list_of_surrogates
        # End text trap
        sys.stdout = sys.__stdout__

    elif (self.romtype == ROMType.interpolation):

        def interpolation_expression(coeffs, vars, vals):
            expr = coeffs[0]
            for i in range(1, len(coeffs)):
                expr += coeffs[i] * (vars[i - 1] - vals[i - 1])
            return expr

        def interpolation_evaluation(eqn, vars, x_data):
            from pyomo.environ import Objective, ConcreteModel, value
            md = ConcreteModel()
            md.o2 = Objective(expr=eqn)
            y_eq = np.zeros((x_data.shape[0], 1))
            for j in range(0, x_data.shape[0]):
                for i in range(0, len(vars)):
                    vars[i] = x_data[j, i]
            y_eq[j, 0] = value(md.o2([vars]))
            return y_eq

        list_of_surrogates = []  # Will contain the surrogate parameters
        y_surrogates = []  # Will contain the output predictions from the surrogates when required
        surrogate_expressions = []  # Will contain the list of surrogate expressions
        surrogate_objects = []

        # For all external functions (verify!):
        for k in range(0, self.ly):
            surrogate_expressions.append([])
            surrogate_objects.append([])
            rom_params.append([])
            rom_params[k].append(y1[k])

            # Generate pyomo equations: collect index of terms in indx, collect terms from xvars, then generate expression
            indx = self.exfn_xvars_ind[k]
            surr_vars = []
            for p in range(0, len(indx)):
                surr_vars.append(self.TRF.xvars[indx[p]])

            # Check if it works with Ampl
            fcn = self.TRF.external_fcns[k]._fcn
            values = [];
            for j in self.exfn_xvars_ind[k]:
                values.append(x[j])

            # Evaluate coefficients:  same as original implementation
            for j in range(0, len(values)):
                radius = radius_base  # * scale[j]
                values[j] = values[j] + radius
                y2 = fcn._fcn(*values)
                rom_params[k].append((y2 - y1[k]) / radius)
                values[j] = values[j] - radius

            # Generate expression and surrogate object
            surrogate_expressions[k].append(interpolation_expression(rom_params[k], surr_vars, values))
            surrogate_objects[k].append(
                lambda u, interpolation_evaluation=interpolation_evaluation, surr_vars=surr_vars, values=values:
                interpolation_evaluation(interpolation_expression(rom_params[k], surr_vars, values), surr_vars, u))
            list_of_surrogates.append(rom_params[k])

    surrogate_objective, surrogate_constraints = surrogate_obj_constraints(self, self.ly, surrogate_expressions)
    return rom_params, surrogate_objects, surrogate_objective, surrogate_constraints, y1