Ejemplo n.º 1
0
def create_training_points_regular_maxi4d(n_target, noise_level):
    """ create array of training points from
    regular turbine arrays
    Use maximin in 4d
    Returns
    -------
    X_train_real:    ndarray of shape(variable,6)
                    array containing valid training points
    y_train:         ndarray of shape(variable,)
                    value of CT* at test points
    n_train:        int
                    number of valid training points
    """
    regular_array = dp.maximin_reconstruction(n_target, 4)
    # rescale to design in range S_x = [2,20] S_y = [2,20],
    # S_off = [0, S_y] and theta = [0, pi]
    regular_array[:, 0] = 2 + 18 * regular_array[:, 0]
    regular_array[:, 1] = 2 + 18 * regular_array[:, 1]
    regular_array[:, 2] = regular_array[:, 1] * regular_array[:, 2]
    regular_array[:, 3] = np.pi * regular_array[:, 3]
    #convert regular array into 3 most important turbines
    X_train_real = np.zeros((n_target, 6))
    for i in range(n_target):
        X_train_real[i, :] = find_important_turbines(regular_array[i, 0],
                                                     regular_array[i, 1],
                                                     regular_array[i, 2],
                                                     regular_array[i, 3])
    y_train = np.zeros(len(X_train_real))
    for i in range(len(X_train_real)):
        y_train[i] = simulator6d_halved(X_train_real[i, :], noise_level)
    n_train = n_target
    return X_train_real, y_train, n_train
Ejemplo n.º 2
0
def create_training_points_irregular_transformed(n_target, noise_level):
    """ create array of training points
    Discard any training points where turbines are
    not in the correct order and any training points where
    turbines are closer than 2D
    Maximin design in the transformed space

    Parameters
    ----------
    n_target: int
        target number of training points
    noise_level: float
        Level of gaussian noise to be added to
        simulator

    Returns
    -------
    X_train:        ndarray of shape(variable,6)
                    array containing valid training points
    X_train_tran:   ndarray of shape(variable,6)
                    array containing valid transformed training points
    y_train:         ndarray of shape(variable,)
                    value of CT* at test points
    n_train:        int
                    number of valid training points
    """
    X_train_tran = dp.maximin_reconstruction(n_target, 6)
    X_train = np.zeros((len(X_train_tran), 6))
    X_train[:, 0] = expon(scale=10).ppf(X_train_tran[:, 0])
    X_train[:, 1] = norm(0, 2.5).ppf(X_train_tran[:, 1])
    X_train[:, 2] = expon(scale=10).ppf(X_train_tran[:, 2])
    X_train[:, 3] = norm(0, 2.5).ppf(X_train_tran[:, 3])
    X_train[:, 4] = expon(scale=10).ppf(X_train_tran[:, 4])
    X_train[:, 5] = norm(0, 2.5).ppf(X_train_tran[:, 5])
    # exclude training points where turbine 1 is closer than 2D
    X_train_dist = np.sqrt(X_train[:, 0]**2 + X_train[:, 1]**2)
    X_train_real = X_train[X_train_dist > 2]
    X_train_tran = X_train_tran[X_train_dist > 2]
    # exclude training points where turbine 2 is more important"
    # than turbine 1 using distance = sqrt(10*x_1^2 + y_1^2)
    X_train_sig = calculate_distance(X_train_real[:, 2],
                                    X_train_real[:, 3]) \
        - calculate_distance(X_train_real[:, 0], X_train_real[:, 1])
    X_train_real = X_train_real[X_train_sig > 0]
    X_train_tran = X_train_tran[X_train_sig > 0]
    # exclude training points where turbine 3 is more important
    # than turbine 2 using distance = sqrt(10*x_1^2 + y_1^2)
    X_train_sig = calculate_distance(X_train_real[:, 4],
                                    X_train_real[:, 5]) \
        - calculate_distance(X_train_real[:, 2], X_train_real[:, 3])
    X_train_real = X_train_real[X_train_sig > 0]
    X_train_tran = X_train_tran[X_train_sig > 0]
    # run simulations to find data points
    y_train = np.zeros(len(X_train_real))
    for i in range(len(X_train_real)):
        y_train[i] = simulator6d_halved(X_train_real[i, :], noise_level)
    n_train = len(X_train_real)
    X_train = X_train_real
    return X_train, X_train_tran, y_train, n_train
Ejemplo n.º 3
0
def uniform_lhs(nSamples, variables, **kwargs):
    '''Do a uniform Latin Hypercube Sampling

    Parameters
    ----------
    nSamples : int
        number of samples to draw
    variables : dict(str, tuple(lower, upper) )
        variable dictionary, the key must be the name whereas the value
        must be a tuple. The first value of the tuple is the lower bound
        for the variable and the second one is the upper bound
    **kwargs
        arguments passed on to diversipy.hycusampling.maximin_reconstruction

    Returns
    -------
    column_names : list(str)
        list with the column names for the LHS
    samples : np.ndarray
        numpy array with latin hypercube samples. Shape is nSamples x len(variables).

    Examples
    --------
        >>> from qd.numerics.sampling import uniform_lhs
        >>> 
        >>> variables = {'length':[0,10], 'angle':[-3,3]}
        >>> labels, data = uniform_lhs(nSamples=100, variables=variables)
        >>> labels
        ['angle', 'length']
        >>> data.shape
        (100, 2)
        >>> data.min(axis=0)
        array([-2.98394928,  0.00782609])
        >>> data.max(axis=0)
        array([ 2.8683843 ,  9.80865352])
    '''

    assert isinstance(nSamples, int)
    assert isinstance(variables, dict)
    assert all(isinstance(var_name, str) for var_name in variables.keys())
    assert all(
        isinstance(entry, (tuple, list, np.ndarray))
        for entry in variables.values())

    variable_labels = sorted(variables.keys())

    # extract variable limits
    vars_bounds = np.vstack(variables[label] for label in variable_labels)

    # lhs sampling in a unit square
    data = maximin_reconstruction(nSamples, len(variable_labels), **kwargs)

    # adapt to variable limits: [0;1] -> [min, max]
    vars_min = vars_bounds[:, 0]
    vars_max = vars_bounds[:, 1]
    for iRow in range(data.shape[0]):
        data[iRow] = (vars_max - vars_min) * data[iRow] + vars_min

    return variable_labels, data
Ejemplo n.º 4
0
def uniform_lhs(nSamples, variables, **kwargs):
    '''Do a uniform Latin Hypercube Sampling

    Parameters
    ----------
    nSamples : int
        number of samples to draw
    variables : dict(str, tuple(lower, upper) )
        variable dictionary, the key must be the name whereas the value
        must be a tuple. The first value of the tuple is the lower bound
        for the variable and the second one is the upper bound
    **kwargs
        arguments passed on to diversipy.hycusampling.maximin_reconstruction

    Returns
    -------
    column_names : list(str)
        list with the column names for the LHS
    samples : np.ndarray
        numpy array with latin hypercube samples. Shape is nSamples x len(variables).

    Examples
    --------
        >>> from qd.numerics.sampling import uniform_lhs
        >>> 
        >>> variables = {'length':[0,10], 'angle':[-3,3]}
        >>> labels, data = uniform_lhs(nSamples=100, variables=variables)
        >>> labels
        ['angle', 'length']
        >>> data.shape
        (100, 2)
        >>> data.min(axis=0)
        array([-2.98394928,  0.00782609])
        >>> data.max(axis=0)
        array([ 2.8683843 ,  9.80865352])
    '''

    assert isinstance(nSamples, int)
    assert isinstance(variables, dict)
    assert all( isinstance(var_name, str) for var_name in variables.keys() )
    assert all( isinstance(entry, (tuple,list,np.ndarray)) 
                                  for entry in variables.values() )

    variable_labels = sorted( variables.keys() )

    # extract variable limits
    vars_bounds = np.vstack( variables[label] for label in variable_labels )

    # lhs sampling in a unit square
    data = maximin_reconstruction(nSamples, len(variable_labels), **kwargs)

    # adapt to variable limits: [0;1] -> [min, max]
    vars_min = vars_bounds[:,0]
    vars_max = vars_bounds[:,1]
    for iRow in range(data.shape[0]):
        data[iRow] = (vars_max-vars_min)*data[iRow]+vars_min

    return variable_labels, data
Ejemplo n.º 5
0
"""Maximin sampling of regular arrays and then plot
(x1,y1,x2,y2,x3,y3) for 3 most important turbines"""
import sys
import matplotlib.pyplot as plt
import diversipy.hycusampling as dp
import numpy as np
sys.path.append(r'/home/andrewkirby72/phd_work/data_synthesis')
from regular_array_sampling.functions import find_important_turbines

num_points = 40
regular_array = dp.maximin_reconstruction(num_points, 4)
# rescale to design in range S_x = [2,20] S_y = [2,20],
# S_off = [0, S_y] and theta = [0, pi]
regular_array[:, 0] = 2 + 18 * regular_array[:, 0]
regular_array[:, 1] = 2 + 18 * regular_array[:, 1]
regular_array[:, 2] = regular_array[:, 1] * regular_array[:, 2]
regular_array[:, 3] = np.pi * regular_array[:, 3]

fig = plt.figure(figsize=(12.0, 5.0))
turbine1 = fig.add_subplot(1, 3, 1)
turbine1.set_xlabel('x_1 (D m)')
turbine1.set_ylabel('y_1 (D m)')
turbine2 = fig.add_subplot(1, 3, 2)
turbine2.set_xlabel('x_2 (D m)')
turbine2.set_ylabel('y_2 (D m)')
turbine3 = fig.add_subplot(1, 3, 3)
turbine3.set_xlabel('x_3 (D m)')
turbine3.set_ylabel('y_3 (D m)')

turbine_coords = np.zeros((num_points, 6))
for i in range(num_points):