コード例 #1
1
ファイル: latin_design.py プロジェクト: SheffieldML/GPyOpt
    def get_samples(self, init_points_count, criterion='center'):
        """
        Generates required amount of sample points
        
        :param init_points_count: Number of samples to generate
        :param criterion: For details of the effect of this parameter, please refer to pyDOE.lhs documentation
                          Default: 'center'
        :returns: Generated samples
        """
        samples = np.empty((init_points_count, self.space.dimensionality))

        # Use random design to fill non-continuous variables
        random_design = RandomDesign(self.space)
        random_design.fill_noncontinous_variables(samples)

        if self.space.has_continuous():
            bounds = self.space.get_continuous_bounds()
            lower_bound = np.asarray(bounds)[:,0].reshape(1, len(bounds))
            upper_bound = np.asarray(bounds)[:,1].reshape(1, len(bounds))
            diff = upper_bound - lower_bound

            from pyDOE import lhs
            X_design_aux = lhs(len(self.space.get_continuous_bounds()), init_points_count, criterion=criterion)
            I = np.ones((X_design_aux.shape[0], 1))
            X_design = np.dot(I, lower_bound) + X_design_aux * np.dot(I, diff)

            samples[:, self.space.get_continuous_dims()] = X_design

        return samples
コード例 #2
0
 def generateCentroids(self):
     if self.dimensions<self.centre_num:
         centres = lhs(self.dimensions, samples=self.centre_num, criterion=self.criterion)
     else:
         centres = lhs(self.dimensions, criterion=self.criterion)
         centres = centres[:self.centre_num]
     for i, centre in enumerate(centres):
         centres[i] = map(lambda x: x*self.hyper_cube_length, centre)
     std = sqrt(self.variance)
     weight = 1
     self.weights = [1 for i in xrange(self.centre_num)]
     self.centroids = [Centroid(centre, i, std, weight) for i, centre in enumerate(centres)]
コード例 #3
0
ファイル: rbfopt_utils.py プロジェクト: NoobSajbot/rbfopt
def get_lhd_corr_points(var_lower, var_upper):
    """Compute a latin hypercube design with min correlation.

    Compute a list of (n+1) points in the given box, where n is the
    dimension of the space. The selected points are picked according
    to a random latin hypercube design with minimum correlation
    criterion. This function relies on the library pyDOE.

    Parameters
    ----------
    var_lower : List[float]
        List of lower bounds of the variables.
    var_upper : List[float]
        List of upper bounds of the variables.

    Returns
    -------
    List[List[float]]
        List of points in the latin hypercube design.
    """
    assert(len(var_lower)==len(var_upper))

    n = len(var_lower)
    if (n == 1):
        # For unidimensional problems, simply take the two endpoints
        # of the interval as starting points
        return [var_lower, var_upper]
    # Otherwise, generate the LHD
    lhd = pyDOE.lhs(n, n+1, 'correlation')
    node_pos = [[var_lower[i] + (var_upper[i] - var_lower[i])*lhd_point[i] 
                 for i in range(n)] for lhd_point in lhd]
    return node_pos
コード例 #4
0
ファイル: utils.py プロジェクト: Basvanstein/OWCK
def get_design_sites(dim, n_sample, x_lb, x_ub, sampling_method="lhs"):

    x_lb = atleast_2d(x_lb)
    x_ub = atleast_2d(x_ub)

    x_lb = x_lb.T if size(x_lb, 0) != 1 else x_lb
    x_ub = x_ub.T if size(x_ub, 0) != 1 else x_ub

    if sampling_method == "lhs":
        # Latin Hyper Cube Sampling: Get evenly distributed sampling in R^dim
        samples = lhs(dim, samples=n_sample) * (x_ub - x_lb) + x_lb

    elif sampling_method == "uniform":
        samples = np.random.rand(n_sample, dim) * (x_ub - x_lb) + x_lb

    elif sampling_method == "sobol":
        seed = mod(int(time.time()) + os.getpid(), int(1e6))
        samples = np.zeros((n_sample, dim))
        for i in range(n_sample):
            samples[i, :], seed = i4_sobol(dim, seed)
        samples = samples * (x_ub - x_lb) + x_lb

    elif sampling_method == "halton":
        sequencer = Halton(dim)
        samples = sequencer.get(n_sample) * (x_ub - x_lb) + x_lb

    return samples
コード例 #5
0
def sampleHypercube(n_dim, n_samp, rand_set_id=0, crit='m', iterations=5,
                    rdata_dir='~/cowen/data/random'):
    """Load (if file exists) or generate samples from within hypercube using
    Latin hypercube sampling

    Requires pyDOE to generate new samples.
    """
    fname = samplesFilename(n_dim=n_dim,
                            n_samp=n_samp,
                            rand_set_id=rand_set_id,
                            crit=crit,
                            iterations=iterations)
    rdata_dir = os.path.expandvars(os.path.expanduser(rdata_dir))
    fpath = os.path.join(rdata_dir, fname)

    if os.path.exists(fpath):
        samps = fileio.from_file(fpath)
    else:
        logging.info('File not found. Generating new set of samples & saving'
                     ' result to "%s"', fpath)
        import pyDOE
        mkdir(rdata_dir)
        # Set a deterministic random state based upon the critical hypercube
        # sampling parameters specified
        n_bad_seeds(n_dim, n_samp, rand_set_id)
        samps = pyDOE.lhs(n=n_dim, samples=n_samp, criterion=crit,
                          iterations=iterations)
        fileio.to_file(samps, fpath)
    return samps
コード例 #6
0
    def sample(self): # sample with max expected improvement
        self.opt_ini_guess = lhs(self.p, 10) # samples for internal max expectation
        self.opt_ini_guess = self.opt_ini_guess*(self.bounds[:, 1]-self.bounds[:, 0])+self.bounds[:, 0]
        p = self.opt_ini_guess.shape[1] # problem size
        n = self.opt_ini_guess.shape[0] # number of optimization runs
        # test_lambdas = np.array([int(self.p)*[0.01],
        #                          int(self.p)*[0.1],
        #                          int(self.p)*[1.0],
        #                          int(self.p)*[10.0]])
        # self.concentrated_likelihood(test_lambdas)  # only for CL-opt eq 4 ego (jones '98)
        self.SI = np.diag(np.ones(self.bounds.shape[0])*
                          self.irl_list[self.X.shape[0]-self.num_ini_guess])  # only for IRL ego

        self.fit()
        func = lambda x: - self.f(x)
        result_x = np.zeros((n, p))
        result_f = np.zeros((n, 1))
        for i, x0 in enumerate(self.opt_ini_guess):
            res = opt.minimize(func, x0=x0, bounds=self.bounds, method='slsqp', tol=1e-5,
                                   options={'eps': 1e-8, 'iprint': 2, 'disp': False, 'maxiter': 100})
            result_f[i] = res.fun
            result_x[i] = res.x
            if np.any(np.isnan(res.x)==True):
                wait = 1

        if np.abs(np.min(result_f, axis=0)) < 1e-3:
            return np.zeros((1,1)) # terminate if no improvement

        return result_x[np.argmin(result_f, axis=0)]
コード例 #7
0
    def generate_points(self):
        """Generate a matrix with the initial sample points,
        scaled to the unit cube

        :return: Latin hypercube design in the unit cube
        """
        return pydoe.lhs(self.dim, self.npts, self.criterion)
コード例 #8
0
ファイル: util.py プロジェクト: neeljp/pod_deim
def generate_prameter_samples(n,p):
        ''' generates Latin-Hypercubesamples with pyDOE and
        transforms bounds.
         Parameters
        -------
        n: int 
            number of samples
        p: 5,7 
            number of parameters
        Returns
        -------
        lh:  2D array
                 of parameter samples 
        '''
        if p==7:
            b=np.array([0.75,200,0.95,1.5,50,0.05,1.5])
            a=np.array([0.25,1.5,0.05,0.25,10,0.01,0.7])
        elif p==5 :
            b=np.array([0.05,200,1.5,50,1.5])
            a=np.array([0.01,1.5,0.25,10,0.7])
        else:
            return 1
        lh = pyDOE.lhs(p, samples=n)
        #upper and lower bound of parameters un MIT-gcm-PO4-DOP
 
        lh = lh*(b-a)+a
        return lh
コード例 #9
0
    def create_macros_LHS(self,
                          number_of_macros=None,
                          criterion=None,
                          iterations=None,
                          batch_size = None,
                          append_default=False):
        try:
            import pyDOE
        except ImportError:
            raise ImportError(
                'The pyDOE package must be install to use this class')

        if batch_size is not None:
            return _batch(self.create_macros_LHS(number_of_macros,
                                                 criterion,
                                                 iterations), n = batch_size)

        if number_of_macros is None:
            number_of_macros = self.number_of_macros

        if self.seed is not None:
            np.random.seed(self.seed)

        factors = sum([e.n_factors for e in self
                       if isinstance(e, SetValue_random)])
        lhs_matrix = pyDOE.lhs(factors, number_of_macros,
                               criterion=criterion,
                               iterations=iterations)

        macro_list = []

        for macro_idx in range(number_of_macros):
            macro = []
            lhs_idx = 0
            for elem in self:
                if isinstance(elem, SetValue_random):
                    lhs_val = lhs_matrix[macro_idx, lhs_idx:lhs_idx +
                                         elem.n_factors]
                    mcr = elem.get_macro(macro_idx,
                                         lower_tail_probability=lhs_val)
                    lhs_idx += elem.n_factors
                else:
                    mcr = elem.get_macro(macro_idx)

                if self.counter_token:
                    mcr = mcr.replace(self.counter_token, str(macro_idx))
                if len(mcr) > 0:
                    macro.extend(mcr.split('\n'))
            macro_list.append(macro)
        if append_default is not None:
            macro = []
            for elem in self:
                mcr = elem.get_macro(number_of_macros)
                if self.counter_token:
                   mcr = mcr.replace(self.counter_token, str(number_of_macros))
                if len(mcr) > 0:
                    macro.extend(mcr.split('\n'))
            macro_list.append(macro)
        return macro_list
コード例 #10
0
def LHS_TITAN(num_sample,min1,range1,min2,range2):  
   
    #number of random dimension
    randoms=2
    
    lhd = pylhs.lhs(randoms, num_sample)
    lhs=np.zeros((num_sample,randoms))
    lhs[:,0]=lhd[:,0]*range1+min1
    lhs[:,1]=lhd[:,1]*range2+min2
    
    fp = open(os.path.join(os.getcwd(),'titan/input_Coulomb.py'),'r')
    simdata = fp.read()
    fp.close
    
    fpp = open(os.path.join(os.getcwd(),'titan/exec_script'),'r')
    simdatap = fpp.read()
    fpp.close
    
    
    internal=1111
    bed=2222    
    
    jubnumber=55555
    
    dircs_name='LHS'
    
    #delete the folders if they are already exist
    for i in range(0,num_sample): 
        dirname=os.path.join(os.getcwd(),dircs_name+str(i))
        if (os.path.isdir(dirname)):
            st.rmtree(os.path.join(os.getcwd(),dircs_name+str(i))) 
    
    #creates the folder for simulation
    for i in range(0,num_sample): 
        st.copytree('titan',os.path.join(os.getcwd(),dircs_name+str(i))) 
    
    #replace the sample value with initial value in the list of input    
    for i in range(0,num_sample):        
        print lhs[i,:]
        rep_internal="{:.2f}".format(lhs[i,0])
        rp_simdata = simdata.replace(str(internal),rep_internal)
        rep_bed="{:.2f}".format(lhs[i,1])
        rp_simdata = rp_simdata.replace(str(bed),rep_bed)
        fp = open(os.path.join(os.getcwd(),dircs_name+str(i)+'/input_Coulomb.py'),'w')
        fp.write(rp_simdata)
        fp.flush()
        fp.close
#        print i
    #modifying the job name in the slurm scripts for each sample    
    for i in range(0,num_sample):        
        rep_jubnumber="{:.0f}".format(i)
        rp_simdatap = simdatap.replace(str(jubnumber),rep_jubnumber)   
        fpp = open(os.path.join(os.getcwd(),dircs_name+str(i)+'/exec_script'),'w')
        fpp.write(rp_simdatap)
        fpp.flush()
        fpp.close      

    #writing the LHS design points to a .csv     
    np.savetxt('Design_Points.csv', lhs, delimiter=',')
コード例 #11
0
ファイル: generate_macros.py プロジェクト: Fheinen/AnyPyTools
    def generate_macros(self, batch_size = None):
        try:
            import pyDOE
        except ImportError:
            raise ImportError('The pyDOE package must be install to use this class')

        self.lhd = pyDOE.lhs(self.LHS_factors, samples=self.number_of_macros)
        return super(LatinHyperCubeMacroGenerator,self).generate_macros(batch_size)
コード例 #12
0
ファイル: test_doe.py プロジェクト: maximtyan/maxim-codes
def run_test_1():
    x = lhs(2,2)
    print x

    plt.figure(1)
    plt.plot(x[:,0],x[:,1],'ro')
    #plt.axis([-.5,1.5,-.5,1.5])
    plt.grid(True)
    plt.show()
コード例 #13
0
def _centered_latin_hypercube(center, schlinge, lb, ub, samples):
    samples = max(samples, len(center))
    design = lhs(len(lb), samples)
    diameter = (ub - lb) / 2
    _lb = center - diameter * schlinge
    _ub = center + diameter * schlinge
    lb = np.maximum(lb, _lb)
    ub = np.minimum(ub, _ub)
    return design * (ub - lb) + lb
コード例 #14
0
ファイル: tmp_doe.py プロジェクト: maximtyan/maxim-codes
def save_samples():
    n = 100
    dim = 2
    path = 'LHC_test_samples_2d.txt'
    xDoe = lhs(dim,n)
    fid = open(path,'wt')
    for xline in xDoe:
        for x in xline:
            fid.write('%.6f\t'%x)
        fid.write('\n')
    fid.close()
コード例 #15
0
ファイル: theta.py プロジェクト: CoAxLab/radd
def latin_hypercube(pkeys, kind='dpm', nrvs=25, tb=.65, force_normal=False):
    """ sample random parameter sets to explore global minima (called by
    Optimizer method __hop_around__())
    """
    nparams = len(pkeys)
    design = lhs(nparams, samples=nrvs, criterion='center')
    bounds = get_bounds(kind=kind)
    # reverse V_brake boundaries to get max negative val
    bounds['ssv'] = (bounds['ssv'][1], bounds['ssv'][0])
    pmax = np.array([bounds[pk][-1] for pk in pkeys])
    design = pmax * design
    samplesLH = {p: design[:, i] for i, p in enumerate(pkeys)}
    return samplesLH
コード例 #16
0
ファイル: latin_design.py プロジェクト: JRetza/emukit
    def get_samples(self, point_count):
        bounds = self.parameter_space.get_bounds()
        X_design_aux = pyDOE.lhs(len(bounds), point_count, criterion='center')
        ones = np.ones((X_design_aux.shape[0], 1))

        lower_bound = np.asarray(bounds)[:, 0].reshape(1, len(bounds))
        upper_bound = np.asarray(bounds)[:, 1].reshape(1, len(bounds))
        diff = upper_bound - lower_bound

        X_design = np.dot(ones, lower_bound) + X_design_aux * np.dot(ones, diff)

        samples = self.parameter_space.round(X_design)

        return samples
コード例 #17
0
ファイル: sampleNonpara.py プロジェクト: yunyangye/yunyangye
def sampleNonpara(num,cz):
    #read the variable table, "variable"
    data_set_temp = np.genfromtxt('./variable.csv',
                                  skip_header=1,
                                  dtype=str,
                                  delimiter=',')

    #generate the data set under cz
    climate = ['1A','2A','2B','3A','3B','3C','4A','4B','4C','5A','5B','6A','6B','7A','8A']
    ind = climate.index(cz)
    data_set = []
    k = 1
    for row in data_set_temp:
        temp = [str(k)]
        temp.append(row[0])#the measure's name
        temp.append(row[1])#the argument's name
        temp.append(float(row[ind+2]))#the minimum value
        temp.append(float(row[ind+19]))#the maximum value
        data_set.append(temp)
        k += 1

    names = []
    bounds = []
    for row in data_set:
        names.append(row[0])
        temp = []
        temp.append(row[3])
        temp.append(row[4])
        bounds.append(temp)
    
    #set the variables and ranges of variables
    problem = {
        'num_vars': len(data_set),
        'names': names,
        'bounds': bounds
    }

    #select the samples
    sample_temp = doe.lhs(len(data_set), samples=num)
    
    param_values = []
    for row1 in sample_temp:
        temp = []
        for ind,row in enumerate(bounds):
            temp.append((row[1]-row[0])*row1[ind]+row[0])
        param_values.append(temp)
    
    return data_set,problem,param_values
コード例 #18
0
    def generate_macros(self, batch_size = None):
        try:
            import pyDOE
        except ImportError:
            raise ImportError('The pyDOE package must be install to use this class')

        # Only generate LHS values if user requested more than macros, since the
        # first macro is allways the mean value
        if self.number_of_macros > 1:
            # Create the Latin hyper cube sample matrix. This is used by the
            # individual macro generator functions when macros are created.
            self.lhd = pyDOE.lhs(self.LHS_factors,  samples=self.number_of_macros-1,
                                 criterion = self.criterion,
                                 iterations = self.iterations)

        return super(LatinHyperCubeMacroGenerator,self).generate_macros(batch_size)
コード例 #19
0
ファイル: test_basicSampling.py プロジェクト: npandachg/BET
def verify_random_samples(model, sampler, sample_type, param_min, param_max,
        num_samples, savefile, parallel):
    # recreate the samples
    if num_samples == None:
        num_samples = sampler.num_samples
    param_left = np.repeat([param_min], num_samples, 0)
    param_right = np.repeat([param_max], num_samples, 0)
    samples = (param_right-param_left)
    if sample_type == "lhs":
        samples = samples * pyDOE.lhs(param_min.shape[-1], num_samples)
    elif sample_type == "random" or "r":
        np.random.seed(1)
        samples = samples * np.random.random(param_left.shape)
    samples = samples + param_left
    # evalulate the model at the samples directly
    data = model(samples)

    # evaluate the model at the samples
    # reset the random seed
    if sample_type == "random" or "r":
        np.random.seed(1)
    (my_samples, my_data) = sampler.user_samples(samples, savefile,
            parallel)

    # make sure that the samples are within the boundaries
    assert np.all(my_samples <= param_right)
    assert np.all(my_samples >= param_left)

    if len(data.shape) == 1:
        data = np.expand_dims(data, axis=1)
    if len(samples.shape) == 1:
        samples = np.expan_dims(samples, axis=1)
    
    # compare the samples
    nptest.assert_array_equal(samples, my_samples)
    # compare the data
    nptest.assert_array_equal(data, my_data)
    # did num_samples get updated?
    assert samples.shape[0] == sampler.num_samples
    assert num_samples == sampler.num_samples
    # did the file get correctly saved?
    
    if comm.rank == 0:
        mdat = sio.loadmat(savefile)
        nptest.assert_array_equal(samples, mdat['samples'])
        nptest.assert_array_equal(data, mdat['data'])
    comm.Barrier()
コード例 #20
0
ファイル: DOE.py プロジェクト: leal26/optimization_tools
    def define_points(self, runs=None):
        """
        Method to define the points to be evaluated based on the results from
        distribution given by the array method and the bound defined by the
        add_variable method.

        For dummy, levels means nothing"""
        self.n_var = 0
        self.n_var_2 = 0

        for variable in self.variables:
            if variable['levels'] == self.levels:
                self.n_var += 1
            elif variable['levels'] == 2:
                self.n_var_2 += 1
            else:
                raise Exception('A variable has a number of levels that is ' +
                                'not the default or 2')
        if self.driver == 'Taguchi':
            self.Taguchi()
        elif self.driver == 'Full Factorial':
            self.FullFactorial()
        elif self.driver == 'Random':
            self.runs = runs
            self.Random(runs)
        elif self.driver == 'Latin Hypercube':
            self.runs = runs
            self.array = lhs(len(self.variables), samples=runs,
                             criterion='center')

        self.domain = {}

        for j in range(self.n_var+self.n_var_2):
            upper = self.variables[j]['upper']
            lower = self.variables[j]['lower']
            levels = self.variables[j]['levels']
            type = self.variables[j]['type']

            dummy = []
            for i in range(self.runs):
                scale = self.array[i][j]
                if type == int and (scale*(upper-lower) % (levels-1.) != 0):
                    raise Exception('The bounds of the defined integer are ' +
                                    'not compatible with number of levels.')
                else:
                    dummy.append(lower + scale*(upper-lower) / (levels-1.))
            self.domain[self.variables[j]['name']] = dummy
コード例 #21
0
ファイル: basicSampling.py プロジェクト: npandachg/BET
    def random_samples(self, sample_type, param_min, param_max,
            savefile, num_samples=None, criterion='center', parallel=False):
        """
        Sampling algorithm with three basic options

            * ``random`` (or ``r``) generates ``num_samples`` samples in
                ``lam_domain`` assuming a Lebesgue measure.
            * ``lhs`` generates a latin hyper cube of samples.

        Note: This function is designed only for generalized rectangles and
        assumes a Lebesgue measure on the parameter space.
       
        :param string sample_type: type sampling random (or r),
            latin hypercube(lhs), regular grid (rg), or space-filling
            curve(TBD) 
        :param param_min: minimum value for each parameter dimension
        :type param_min: :class:`numpy.ndarray` (ndim,)
        :param param_max: maximum value for each parameter dimension
        :type param_max: :class:`numpy.ndarray` (ndim,)
        :param string savefile: filename to save samples and data
        :param int num_samples: N, number of samples (optional)
        :param string criterion: latin hypercube criterion see 
            `PyDOE <http://pythonhosted.org/pyDOE/randomized.html>`_
        :param bool parallel: Flag for parallel implementation. Uses
            lowercase ``mpi4py`` methods if ``samples.shape[0]`` is not
            divisible by ``size``. Default value is ``False``. 
        :rtype: tuple
        :returns: (``parameter_samples``, ``data_samples``) where
            ``parameter_samples`` is np.ndarray of shape (num_samples, ndim)
            and ``data_samples`` is np.ndarray of shape (num_samples, mdim)

        """
        # Create N samples
        if num_samples == None:
            num_samples = self.num_samples
        param_left = np.repeat([param_min], num_samples, 0)
        param_right = np.repeat([param_max], num_samples, 0)
        samples = (param_right-param_left)
         
        if sample_type == "lhs":
            samples = samples * lhs(param_min.shape[-1],
                    num_samples, criterion)
        elif sample_type == "random" or "r":
            samples = samples * np.random.random(param_left.shape) 
        samples = samples + param_left
        return self.user_samples(samples, savefile, parallel)
コード例 #22
0
ファイル: test_basicSampling.py プロジェクト: leiyangcq/BET
def verify_random_sample_set(sampler, sample_type, input_sample_set,
                                     num_samples):
    test_sample_set = input_sample_set
    np.random.seed(1)
    # recreate the samples
    if num_samples is None:
        num_samples = sampler.num_samples

    input_domain = input_sample_set.get_domain()
    if input_domain is None:
        input_domain = np.repeat([[0, 1]], input_sample_set.get_dim(), axis=0)

    input_left = np.repeat([input_domain[:, 0]], num_samples, 0)
    input_right = np.repeat([input_domain[:, 1]], num_samples, 0)

    input_values = (input_right - input_left)
    if sample_type == "lhs":
        input_values = input_values * pyDOE.lhs(input_sample_set.get_dim(),
                                                num_samples, 'center')
    elif sample_type == "random" or "r":
        input_values = input_values * np.random.random(input_left.shape)
    input_values = input_values + input_left
    test_sample_set.set_values(input_values)

    # reset the random seed
    np.random.seed(1)

    # create the sample set from the domain
    print sample_type
    my_sample_set = sampler.random_sample_set(sample_type, input_sample_set,
                                                  num_samples=num_samples)

    # make sure that the samples are within the boundaries
    assert np.all(my_sample_set._values <= input_right)
    assert np.all(my_sample_set._values >= input_left)

    # compare the samples
    if comm.size == 0:
        nptest.assert_array_equal(test_sample_set._values,
                              my_sample_set._values)
コード例 #23
0
ファイル: stats.py プロジェクト: Commonlibs/GPyOpt
def initial_design(design,bounds,data_init):
	"""
	:param design: the choice of designs
	:param bounds: the boundary of initial points
	:param data_init: the number of initial points
	"""
	if design == 'random':
		X_design = samples_multidimensional_uniform(bounds, data_init)
	elif design == 'latin':
		try:
			from pyDOE import lhs
			import numpy as np
			# Genretate point in unit hypercube
			X_design_aux = lhs(len(bounds),data_init, criterion='center')
			# Normalize to the give box constrains
			lB = np.asarray(bounds)[:,0].reshape(1,len(bounds))
			uB = np.asarray(bounds)[:,1].reshape(1,len(bounds))
			diff = uB-lB
			I = np.ones((X_design_aux.shape[0],1))
			X_design = np.dot(I,lB) + X_design_aux*np.dot(I,diff)
		except:
			print("Cannot find pyDOE library, please install it to use a Latin hypercube to initialize the model.")
	return X_design
コード例 #24
0
ファイル: sampleMeta.py プロジェクト: yunyangye/yunyangye
def sampleMeta(num,cz):
    #read the variable table, "variable"
    data_set_temp = np.genfromtxt('./variable.csv',
                                  skip_header=1,
                                  dtype=str,
                                  delimiter=',')

    #generate the data set under cz
    climate = ['1A','2A','2B','3A','3B','3C','4A','4B','4C','5A','5B','6A','6B','7A','8A']
    ind = climate.index(cz)
    data_set = []
    k = 1
    num_sens = 0
    for row in data_set_temp:
        temp = [str(k)]
        temp.append(row[0])#the measure's name
        temp.append(row[1])#the argument's name
        temp.append(float(row[ind+2]))#the minimum value
        temp.append(float(row[ind+19]))#the maximum value
        data_set.append(temp)
        num_sens += 1
        k += 1

    #select the samples
    sample_temp = doe.lhs(num_sens, samples=num*num_sens)
    
    param_values = []
    for row1 in sample_temp:
        temp = []
        num_doe = 0
        for row in data_set:
            temp.append((row[4]-row[3])*row1[num_doe]+row[3])
            num_doe += 1
                
        param_values.append(temp)
    
    return data_set,param_values
コード例 #25
0
ファイル: det_fit.py プロジェクト: johnbachman/tBidBaxLipo
def generate_latin_hypercube(gf, num_samples, basename):
    # The number of parameters
    ndim = (len(gf.builder.global_params) +
            (len(gf.data) * len(gf.builder.local_params)))
    # Generate a latin hypercube of the parameters
    lh = lhs(ndim, samples=num_samples, criterion='center')
    # For each sample...
    for samp_ix in range(num_samples):
        # ...initialize the vector of initial values
        p0 = np.zeros(ndim)
        # For each parameter...
        for p_ix in range(ndim):
            # ...get the prior...
            pr = gf.priors[p_ix]
            # ...and use the inverse CDF of the prior distribution to convert
            # the percentile value on [0, 1] to an initial value
            percentile = lh[samp_ix, p_ix]
            p0[p_ix] = pr.inverse_cdf(percentile)
        print("Saving position:")
        print(p0)
        filename = '%s.%dof%d.lhs' % (basename, samp_ix+1, num_samples)
        with open(filename, 'w') as f:
            cPickle.dump(p0, f)
    return
コード例 #26
0
def design_lhs_exp(variables, maps, offsets=None, samples=int(1e4), project_linear=True):
    """ Design an LHS experiment """

    design = lhs(len(variables), samples=samples, criterion="m", iterations=100)
    z_design = np.zeros_like(design)

    print "Computing LHS design..."
    if project_linear:
        print "   using linear re-projection for log variables"
    else:
        print "   using original variable coordinate"
    for i, v in enumerate(variables):
        dist, a, b = v[3]

        if project_linear:  # Re-sample in linear space
            if v[0].startswith("ln"):
                ## 9/4/2014
                ## This is an experimental correction to re-project the
                ## logarithmic variables into their normal coordinate
                ## system. It should only effect the sampling, and hopefully
                ## improve it by forcing it to even things out over the
                ## actually range we care about
                a = np.exp(a)
                b = np.exp(b)
                offsets[i] = np.exp(offsets[i])

            elif v[0].startswith("log"):
                ## 10/26/2014
                ## In accordance with above, but for log10 vars
                a = 10.0 ** a
                b = 10.0 ** b
                offsets[i] = 10.0 ** offsets[i]

        if offsets:
            ## These corrections with "offsets" re-center the interval
            ## so that the left endpoint is 0. I found that if arbitrary
            ## lower/upper limits were used, sometimes the PPF routines
            ## would really mess up in inverting the CDF.
            a, b = a - offsets[i], b - offsets[i]
        if dist == "uniform":
            design[:, i] = uniform(a, b).ppf(design[:, i])
        elif dist == "normal":
            design[:, i] = norm(a, b).ppf(design[:, i])
        elif dist == "loguniform":
            design[:, i] = loguni_ppf(design[:, i], a, b)
        else:
            raise ValueError("no dist defined for %s" % dist)

        if offsets:
            ## Project back in to the correct limits
            design[:, i] += offsets[i]
            a, b = a + offsets[i], b + offsets[i]

        if project_linear:
            if v[0].startswith("ln"):
                ## 9/4/2014
                ## Second half of correction
                a = np.log(a)
                b = np.log(b)
                design[:, i] = np.log(design[:, i])
            elif v[0].startswith("log"):
                ## 10/26/2014
                a = np.log10(a)
                b = np.log10(b)
                design[:, i] = np.log10(design[:, i])

        z_design[:, i] = maps[i](design[:, i], a, b)
    design = design.T  # in x-coords
    z_design = z_design.T

    return design, z_design
コード例 #27
0
    lb = 0.0  #left boundary
    rb = 1.0  #right boundary

    N_f = 1000  #number of interior points
    layers = [1, 400, 1]

    #constant in the exact solution and RHS
    k = 4

    ###########################
    #Dirichlet data
    u_left = 0
    u_right = 0

    # generate the collocation points
    X_f = lb + (rb - lb) * lhs(1, N_f)

    model = PhysicsInformedNN(lb, u_left, rb, u_right, X_f, layers, k)

    start_time = time.time()
    model.train(1000)
    elapsed = time.time() - start_time
    print('Training time: %.4f' % (elapsed))

    # test data
    nPred = 1001
    X_star = np.linspace(lb, rb, nPred)[np.newaxis]
    X_star = X_star.T

    u_pred, f_u_pred = model.predict(X_star)
コード例 #28
0
ファイル: cubic_heat.py プロジェクト: borjanG/deep.learning
 noise = 0.00
 u_train = u_train + noise*np.std(u_train)*np.random.randn(u_train.shape[0], u_train.shape[1])
 
 # For solution
 N0 = Exact_sol.shape[0]
 N_b = Exact_sol.shape[1]
 N_f = 2000
     
 idx_x = np.random.choice(x_sol.shape[0], N0, replace=False)
 x0_train = x_sol[idx_x,:]
 u0_train = Exact_sol[idx_x,0:1]
 
 idx_t = np.random.choice(t_sol.shape[0], N_b, replace=False)
 tb_train = t_sol[idx_t,:]
 
 X_f_train = lb_sol + (ub_sol-lb_sol)*lhs(2, N_f)
     
 # Layers
 u_layers = [2, 50, 50, 50, 50, 1]
 pde_layers = [3, 100, 100, 1]
 
 layers = [2, 50, 50, 50, 50, 1]
 
 # Model
 model = DeepHPM(t_train, x_train, u_train,
                 x0_train, u0_train, tb_train, X_f_train,
                 u_layers, pde_layers,
                 layers,
                 lb_idn, ub_idn,
                 lb_sol, ub_sol)
     
コード例 #29
0
        f = 10.0 * (1.0 - x) * np.sin(3.0 * np.pi * (x - 0.01))
        return f

    # Exact solution
    def u(x):
        u = (0.65479E-3+(-0.115342E-1)*x+0.659469E-2*x**2+0.176577E-2*x**3+ \
            (-0.65479E-3)*np.cos(0.942478E1*x)+0.119273E-3*x*np.cos(0.942478E1*x)+ \
            0.121116E-2*np.sin(0.942478E1*x)+(-0.126178E-2)*x*np.sin(0.942478E1*x))
        return u

    # Boundary condtions data
    X_u = np.array([0.0, 0.0, 1.0, 1.0])[:, None]
    y_u = np.array([0.0, 0.0, 0.0, 0.0])[:, None]

    # Forcing training data
    X_f = lb + (ub - lb) * lhs(D, N_f)
    y_f = f(X_f) + noise_f * np.random.randn(N_f, D)

    # Test data
    nn = 500
    X_star = np.linspace(lb, ub, nn)[:, None]
    u_star = u(X_star)
    f_star = f(X_star)

    # Compute required kernels
    k_uu, k_uu1, k_uu2, k_uu3, k_uf, \
    k_u1u1, k_u1u2, k_u1u3, k_u1f, \
    k_u2u2, k_u2u3, k_u2f, \
    k_u3u3, k_u3f, \
    k_ff = Beam_kernels()
コード例 #30
0
def create_new_individuals(design, problem, pop_size=None):
    """Create new individuals to the population.

    The individuals can be created randomly, by LHS design, or can be passed by the
    user.

    Design does not apply in case of EvoNN and EvoDN2 problem, where neural networks
    are created as individuals.

    Parameters
    ----------
    design : str, optional
        Describe the method of creation of new individuals.
        "RandomDesign" creates individuals randomly.
        "LHSDesign" creates individuals using Latin hypercube sampling.
        "EvoNN" creates Artificial Neural Networks as individuals.
        "EvoDN2" creates Deep Neural Networks.
    problem : baseProblem
        An object of the class Problem
    pop_size : int, optional
        Number of individuals in the population. If none, some default population
        size based on number of objectives is chosen.

    Returns
    -------
    individuals : list
        A list of individuals.

    """

    if pop_size is None:
        #pop_size_options = [50, 105, 120, 126, 132, 112, 156, 90, 275]
        pop_size_options = np.ones(problem.num_of_objectives) * 50
        pop_size = pop_size_options[problem.num_of_objectives - 2]

    if design == "RandomDesign":
        lower_limits = np.asarray(problem.get_variable_lower_bounds())
        upper_limits = np.asarray(problem.get_variable_upper_bounds)
        individuals = np.random.random((pop_size, problem.n_of_variables))
        # Scaling
        individuals = individuals * (upper_limits -
                                     lower_limits) + lower_limits

        return individuals

    elif design == "LHSDesign":
        lower_limits = np.asarray(problem.get_variable_lower_bounds())
        upper_limits = np.asarray(problem.get_variable_upper_bounds())
        individuals = lhs(problem.n_of_variables, samples=pop_size)
        # Scaling
        individuals = individuals * (upper_limits -
                                     lower_limits) + lower_limits

        return individuals

    elif design == "EvoNN":
        """Create a population of neural networks for the EvoNN algorithm.

        Individuals are 2d arrays representing the weight matrices of the NNs.
        One extra row is added for bias.

        """

        w_low = problem.params["w_low"]
        w_high = problem.params["w_high"]
        in_nodes = problem.num_of_variables
        num_nodes = problem.params["num_nodes"]
        prob_omit = problem.params["prob_omit"]

        individuals = np.random.uniform(w_low,
                                        w_high,
                                        size=(pop_size, in_nodes, num_nodes))

        # Randomly set some weights to zero
        zeros = np.random.choice(np.arange(individuals.size),
                                 ceil(individuals.size * prob_omit))
        individuals.ravel()[zeros] = 0

        # Set bias
        individuals = np.insert(individuals, 0, 1, axis=1)

        return individuals

    elif design == "EvoDN2":
        """Create a population of deep neural networks (DNNs) for the EvoDN2 algorithm.

        Each individual is a list of subnets, and each subnet contains a random amount
        of layers and
        nodes per layer. The subnets are evolved via evolutionary algorithms, and they
        converge
        on the final linear layer of the DNN.
        """

        individuals = []
        for i in range(problem.params["pop_size"]):
            nets = []
            for j in range(problem.params["num_subnets"]):

                layers = []
                num_layers = np.random.randint(1, problem.params["max_layers"])
                in_nodes = len(problem.subsets[j])

                for k in range(num_layers):
                    out_nodes = random.randint(2, problem.params["max_nodes"])
                    net = np.random.uniform(
                        problem.params["w_low"],
                        problem.params["w_high"],
                        size=(in_nodes, out_nodes),
                    )
                    # Randomly set some weights to zero
                    zeros = np.random.choice(
                        np.arange(net.size),
                        ceil(net.size * problem.params["prob_omit"]),
                    )
                    net.ravel()[zeros] = 0

                    # Add bias
                    net = np.insert(net, 0, 1, axis=0)
                    in_nodes = out_nodes
                    layers.append(net)

                nets.append(layers)

            individuals.append(nets)

        return individuals

    elif design == "BioGP":
        return problem.create_individuals()
コード例 #31
0
ファイル: models.py プロジェクト: YangyangFu/MPCPy
    def estimate(self, start_time, final_time, measurement_variable_list, global_start=0, seed=None, use_initial_values=True):
        '''Estimate the parameters of the model.
        
        The estimation of the parameters is based on the data in the 
        ``'Measured'`` key in the measurements dictionary attribute, 
        the parameter_data dictionary attribute, and any exodata inputs.
        
        An optional global start algorithm where multiple estimations are 
        preformed with different initial guesses within the ranges of each 
        free parameter provided.  It is implemented as tested in 
        Blum et al. (2019).  The algorithm uses latin hypercube sampling 
        to choose the initial parameter guess values for each iteration and 
        the iteration with the lowest estimation problem objective value is 
        chosen.  A user-provided guess is included by default using initial
        values given to parameter data of the model, though this option can be 
        turned off to use only sampled initial guesses.
        
        Blum, D.H., Arendt, K., Rivalin, L., Piette, M.A., Wetter, M., and 
        Veje, C.T. (2019). "Practical factors of envelope model setup and 
        their effects on the performance of model predictive control for 
        building heating, ventilating, and air conditioning systems." 
        Applied Energy 236, 410-425. 
        https://doi.org/10.1016/j.apenergy.2018.11.093
        
        Parameters
        ----------
        start_time : string
            Start time of estimation period.
            Setting to 'continue' will result in error.
        final_time : string
            Final time of estimation period.
        measurement_variable_list : list
            List of strings defining for which variables defined in the 
            measurements dictionary attirubute the estimation will 
            try to minimize the error.
        global_start : int, optional
            Number of iterations of a global start algorithm.
            If 0, the global start algorithm is disabled and the values in
            the parameter_data dictionary are used as initial guesses.
            Default is 0.
        seed : numeric or None, optional
            Specific seed of the global start algorithm for the random selection
            of initial value guesses.
            Default is None.
        use_initial_values : boolean, optional
            True to include initial parameter values in the estimation iterations.
            Default is True.

        Yields
        ------
        Updates the ``'Value'`` key for each estimated parameter in the 
        parameter_data attribute.

        '''
        
        # Check for free parameters
        free = False;
        for key in self.parameter_data.keys():
            if self.parameter_data[key]['Free'].get_base_data():
                free = True;
                break
            else:
                free = False;
        if not free:
            # If none free raise error
            raise ValueError('No parameters set as "Free" in parameter_data dictionary. Cannot run parameter estimation.');
        # Check for measurements
        for meas in measurement_variable_list:
            if meas not in self.measurements.keys():
                raise ValueError('Measurement {0} defined in measurement_variable_list not defined in measurements dictionary.'.format(meas))
        # Check for continue
        if start_time == 'continue':
            raise ValueError('"continue" is not a valid entry for start_time for parameter estimation problems.')
        # Perform parameter estimation
        self._set_time_interval(start_time, final_time);
        self.measurement_variable_list = measurement_variable_list;
        # Without global start
        if not global_start:
            self._estimate_method._estimate(self);
        # With global start
        else:
            # Detect free parameters
            free_pars = [];
            for par in self.parameter_data.keys():
                if self.parameter_data[par]['Free'].display_data():
                    free_pars.append(par)
            # Create lhs sample for all parameters
            np.random.seed(seed)  # Random seed for LHS initialization
            n_free_pars = len(free_pars);
            lhs = doe.lhs(n_free_pars, samples=global_start, criterion='c');
            # Scale and store lhs samples for parameters between min and max bounds
            par_vals = dict();
            for par, i in zip(free_pars, range(n_free_pars)):
                par_min = self.parameter_data[par]['Minimum'].display_data();
                par_max = self.parameter_data[par]['Maximum'].display_data();                                
                par_vals[par] = (lhs[:,i]*(par_max-par_min)+par_min).tolist();
                # Add initial value guesses if wanted
                if use_initial_values:
                    par_vals[par].append(self.parameter_data[par]['Value'].display_data())
            # Estimate for each sample
            J = float('inf');
            par_best = dict();
            glo_est_data = dict()
            if use_initial_values:
                iterations = range(global_start+1)
            else:
                iterations = range(global_start)
            for i in iterations:
                # Create dictionary to save all estimation iteration data
                glo_est_data[i] = dict()
                # Set lhs sample values for each parameter
                for par in par_vals.keys():
                    # Use latin hypercube selections
                    self.parameter_data[par]['Value'].set_data(par_vals[par][i]);
                    glo_est_data[i][par] = par_vals[par][i]
                # Make estimate for iteration
                self._estimate_method._estimate(self);
                # Validate estimate for iteration
                self.validate(start_time, final_time, 'validate', plot = 0);
                # Save RMSE for initial_guess
                for key in self.RMSE:
                    glo_est_data[i]['RMSE_{0}'.format(key)] = self.RMSE[key].display_data();
                # If solve succeeded, compare objective and if less, save best par values
                solver_message = self._estimate_method.opt_problem.get_optimization_statistics()[0]
                J_curr = self._estimate_method.opt_problem.get_optimization_statistics()[2]
                glo_est_data[i]['Message'] = solver_message
                glo_est_data[i]['J'] = J_curr
                if ((J_curr < J) and (J_curr > 0.0)) or ((J_curr < J) and (solver_message == 'Solve_Succeeded')):
                    J = J_curr;
                    for par in free_pars:
                        par_best[par] = self.parameter_data[par]['Value'].display_data();
            # Save all estimates
            glo_est_data['J_Best'] = J
            self.glo_est_data = glo_est_data
            # Set best parameters in model if found
            if par_best:
                for par in par_vals.keys():
                    self.parameter_data[par]['Value'].set_data(par_best[par]);
コード例 #32
0
 def _generate(self, n_dimensions: int, n_points: int) -> np.ndarray:
     return pyDOE.lhs(n_dimensions, samples=n_points)
コード例 #33
0
ファイル: Q1.py プロジェクト: akhibhat/ENM531
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 16 19:56:37 2019

@author: akhil
"""

import numpy as np
from pyDOE import lhs
#import torch

N = 500

x = 50 + 4*lhs(1,N)
y = 50 + 4*lhs(1,N)
xstar = (x-np.mean(x))/np.std(x)
ystar = (y-np.mean(y))/np.std(y)

fxy = np.cos(np.pi*xstar)*np.cos(np.pi*ystar)

layers = 2
dim = 2
nsperlayer = 50

def generate_params_layer1():
    inputs = 2
    weights = np.random.normal(0,2/(inputs + nsperlayer),(dim,nsperlayer))
    
    bias1 = np.random.random_sample((1,nsperlayer))
    bias = np.repeat(bias1,N,axis = 0)
    return weights, bias
コード例 #34
0
TXY_kesi_H_train=np.hstack((TXY_kesi_train,H_train))
np.random.shuffle(TXY_kesi_H_train)

TXY_kesi_train=TXY_kesi_H_train[:,0:23]
H_train=TXY_kesi_H_train[:,23:24]



############################################################################
#提取配点数据
Nf=1000000

#随机取点
lb=np.array([0,x.min(),y.min()])
ub=np.array([t.max(),x.max(),y.max()])
TXY_f_train=lb + (ub-lb)*lhs(3, Nf)
TXY_kesi_f=np.hstack((TXY_f_train,np.zeros((Nf,n_eigen))))
kesi_f=np.random.randn(Nf,n_eigen)   #随机数数组

TXY_kesi_f[:,3:(n_eigen+3)]=kesi_f

np.random.shuffle(TXY_kesi_f)
n_colloc=TXY_kesi_f.shape[0]


############################################################################
#提取边界数据
N_boun1=100000

X_boun_col=x[0]*np.ones((N_boun1,1))
Y_boun_col=y[0]+(y[50]-y[0])*lhs(1, N_boun1)
コード例 #35
0
ファイル: testing.py プロジェクト: walter-git/Pysd
def sample_pspace(model, param_list=None, bounds=None, samples=100, seed=None):
    """
    A DataFrame where each row represents a location in the parameter
    space, locations distributed to exercise the full range of values
    that each parameter can take on.

    This is useful for quick and dirty application of tests to a bunch
    of locations in the sample space. Kind-of a fuzz-testing for
    the model.

    Uses latin hypercube sampling, with random values within
    the sample bins. The LHS sampler shuffles the bins each time,
    so a subsequent call will yield a different sample from the
    parameter space.

    When a variable has both upper and lower bounds, use a uniform
    sample between those bounds.

    When a variable has only one bound, use an exponential distribution
    with the scale set to be the difference between the bound and the
    current model value (1 if they are the same)

    When the variable has neither bound, use a normal distribution centered
    on the current model value, with scale equal to the absolute value
    of the model value (1 if that magnitude is 0)

    Parameters
    ----------
    model: pysd.Model object

    param_list: None or list of strings
        The real names of parameters to include in the explored parameter
        space.
        If None, uses all of the constants in the model except TIME STEP,
        INITIAL TIME, etc.

    bounds: DataFrame, string filename, or None
        A range test matrix as used for bounds checking.
        If None, creates one from the model
        These bounds can also place artificial limits on the
        parameter space you want to explore, even if the theoretical
        bounds on the variable are infinite.

    samples: int
        How many samples to include in the iterator?

    Returns
    -------
    lhs : pandas DataFrame
        distribution-weighted latin hypercube samples

    Note
    ----
    Executes the model by 1 time-step to get the current value of parameters.

    """
    if param_list is None:
        doc = model.doc()
        param_list = sorted(
            list(
                set(doc[doc['Type'] == 'constant']['Real Name']) -
                {'FINAL TIME', 'INITIAL TIME', 'TIME STEP', 'TIME STEP'}))

    if isinstance(bounds, _pd.DataFrame):
        bounds = bounds.set_index('Real Name')
    elif bounds is None:
        bounds = create_bounds_test_matrix(model).set_index('Real Name')
    elif isinstance(bounds, str):
        if bounds.split('.')[-1] in ['xls', 'xlsx']:
            bounds = _pd.read_excel(bounds,
                                    sheet_name='Bounds',
                                    index_col='Real Name')
        elif bounds.split('.')[-1] == 'csv':
            bounds = _pd.read_csv(bounds,
                                  index_col='Real Name',
                                  encoding='UTF-8')
        elif bounds.split('.')[-1] == 'tab':
            bounds = _pd.read_csv(bounds,
                                  sep='\t',
                                  index_col='Real Name',
                                  encoding='UTF-8')
        else:
            raise ValueError('Unknown file type: bounds')
    else:
        raise ValueError('Unknown type: bounds')

    if seed is not None:
        _np.random.seed(seed)

    unit_lhs = _pd.DataFrame(_pyDOE.lhs(n=len(param_list), samples=samples),
                             columns=param_list)  # raw latin hypercube sample

    res = model.run(return_timestamps=[model.components.initial_time()])
    lhs = _pd.DataFrame(index=unit_lhs.index)
    for param in param_list:
        lower, upper = bounds[['Min', 'Max']].loc[param]
        value = res[param].iloc[0]

        if lower == upper:
            lhs[param] = lower

        elif _np.isfinite(lower) and _np.isfinite(
                upper):  # np.isfinite(0)==True
            scale = upper - lower
            lhs[param] = _dist.uniform(lower, scale).ppf(unit_lhs[param])

        elif _np.isfinite(lower) and _np.isinf(upper):
            if lower == value:
                scale = 1
            else:
                scale = value - lower
            lhs[param] = _dist.expon(lower, scale).ppf(unit_lhs[param])

        elif _np.isinf(lower) and _np.isfinite(
                upper):  # np.isinf(-np.inf)==True
            if upper == value:
                scale = 1
            else:
                scale = upper - value
            lhs[param] = upper - _dist.expon(0, scale).ppf(unit_lhs[param])

        elif _np.isinf(lower) and _np.isinf(upper):  # np.isinf(-np.inf)==True
            if value == 0:
                scale = 1
            else:
                scale = abs(value)
            lhs[param] = _dist.norm(value, scale).ppf(unit_lhs[param])

        else:
            raise ValueError('Problem with lower: %s or upper: %s bounds' %
                             (lower, upper))

    return lhs
コード例 #36
0
    ub = 1.0 * np.ones(D)
    noise_f = 0.01

    # Forcing term
    def f(x):
        f = -(np.pi**2) * np.sin(np.pi * x) - 5 * np.sqrt(2) * np.sin(
            np.pi * x)
        return f

    # Exact solution
    def u(x):
        u = np.sin(np.pi * x)
        return u

    # Boundary condtions data
    X_u = lb + (ub - lb) * lhs(D, N_u)
    y_u = u(X_u)

    # Forcing training data
    X_f = lb + (ub - lb) * lhs(D, N_f)
    y_f = f(X_f) + noise_f * np.random.randn(N_f, D)

    # Test data
    nn = 500
    X_star = np.linspace(lb, ub, nn)
    u_star = u(X_star)
    f_star = f(X_star)

    # Compute required kernels
    k_uu, k_uf, k_ff = Helmholtz_kernels()
コード例 #37
0
        # flux = 0 bc
        X_l = np.hstack((xx[:, 0].flatten()[:,None],
                              tt[:, 0].flatten()[:,None]))
        X_r = np.hstack((xx[:, -1].flatten()[:,None], 
                              tt[:, -1].flatten()[:,None]))
        # Doman bounds
        lb = X_star.min(0)
        ub = X_star.max(0)    
            
        # Measurement data
        X_U_meas = X_star
        U_meas = U_star
        
        # Collocation points
        N_f = 10000    
        X_f_train = lb + (ub-lb)*lhs(X_U_meas.shape[1], N_f)
        
        X_f_train = np.vstack((X_f_train, X_U_meas))
                        
# =============================================================================
#         train model
# =============================================================================
        model = PhysicsInformedNN(X_U_meas, U_meas, X_f_train, X_l, X_r, layers, lb, ub, scale_factor)
        model.train() 
        
# =============================================================================
#         result
# =============================================================================
        f = open("stdout_ADO.txt", "a+")  
        elapsed = time.time() - start_time                
        f.write('Training time: %.4f \n' % (elapsed))
コード例 #38
0
ファイル: test_basicSampling.py プロジェクト: leiyangcq/BET
def verify_create_random_discretization(model, sampler, sample_type, input_domain,
        num_samples, savefile):

    np.random.seed(1)
    # recreate the samples
    if num_samples is None:
        num_samples = sampler.num_samples
    
    input_sample_set = sample_set(input_domain.shape[0])
    input_sample_set.set_domain(input_domain)
    
    input_left = np.repeat([input_domain[:, 0]], num_samples, 0)
    input_right = np.repeat([input_domain[:, 1]], num_samples, 0)
    
    input_values = (input_right-input_left)
    if sample_type == "lhs":
        input_values = input_values * pyDOE.lhs(input_sample_set.get_dim(),
                num_samples, 'center') 
    elif sample_type == "random" or "r":
        input_values = input_values * np.random.random(input_left.shape)
    input_values = input_values + input_left
    input_sample_set.set_values(input_values)
    
    # evalulate the model at the samples directly
    output_values = (model(input_sample_set._values))
    if len(output_values.shape) == 1:
        output_sample_set = sample_set(1)
    else:
        output_sample_set = sample_set(output_values.shape[1])
    output_sample_set.set_values(output_values)

    # reset the random seed
    np.random.seed(1)
    comm.barrier()
    # create the random discretization using a specified input domain
    my_discretization = sampler.create_random_discretization(sample_type,
            input_domain, savefile, num_samples=num_samples, globalize=True)
    #comm.barrier()
    my_num = my_discretization.check_nums() 
    
    # make sure that the samples are within the boundaries
    assert np.all(my_discretization._input_sample_set._values <= input_right)
    assert np.all(my_discretization._input_sample_set._values >= input_left)

    if comm.size == 0:
        # compare the samples
        nptest.assert_array_equal(input_sample_set._values,
            my_discretization._input_sample_set._values)
        # compare the data
        nptest.assert_array_equal(output_sample_set._values,
            my_discretization._output_sample_set._values)

    # did num_samples get updated?
    assert my_num == sampler.num_samples
    
    # did the file get correctly saved?
    saved_disc = bet.sample.load_discretization(savefile)
    
    # compare the samples
    nptest.assert_array_equal(my_discretization._input_sample_set.get_values(),
        saved_disc._input_sample_set.get_values())
    # compare the data
    nptest.assert_array_equal(my_discretization._output_sample_set.get_values(),
       saved_disc._output_sample_set.get_values())

    # reset the random seed
    np.random.seed(1)

    my_sample_set = sample_set(input_domain.shape[0])
    my_sample_set.set_domain(input_domain)
    #comm.barrier()
    # create the random discretization using an initialized sample_set
    my_discretization = sampler.create_random_discretization(sample_type,
                my_sample_set, savefile, num_samples=num_samples,
                globalize=True)
    my_num = my_discretization.check_nums()

    # make sure that the samples are within the boundaries
    assert np.all(my_discretization._input_sample_set._values <= input_right)
    assert np.all(my_discretization._input_sample_set._values >= input_left)

    if comm.size == 0:
        # compare the samples
        nptest.assert_array_equal(input_sample_set._values,
                              my_discretization._input_sample_set._values)
        # compare the data
        nptest.assert_array_equal(output_sample_set._values,
                              my_discretization._output_sample_set._values)

    # reset the random seed
    np.random.seed(1)
    # recreate the samples to test default choices with unit hypercube domain
    if num_samples is None:
        num_samples = sampler.num_samples

    my_dim = input_domain.shape[0]
    input_sample_set = sample_set(my_dim)
    input_sample_set.set_domain(np.repeat([[0.0, 1.0]], my_dim, axis=0))

    input_left = np.repeat([input_domain[:, 0]], num_samples, 0)
    input_right = np.repeat([input_domain[:, 1]], num_samples, 0)

    input_values = (input_right - input_left)
    if sample_type == "lhs":
        input_values = input_values * pyDOE.lhs(input_sample_set.get_dim(),
                                                num_samples, 'center')
    elif sample_type == "random" or "r":
        input_values = input_values * np.random.random(input_left.shape)
    input_values = input_values + input_left
    input_sample_set.set_values(input_values)

    # reset random seed
    np.random.seed(1)
    comm.barrier()
    # create the random discretization using a specified input_dim
    my_discretization = sampler.create_random_discretization(sample_type,
            my_dim, savefile, num_samples=num_samples, globalize=True)
    #comm.barrier()
    my_num = my_discretization.check_nums()

    # make sure that the samples are within the boundaries
    assert np.all(my_discretization._input_sample_set._values <= input_right)
    assert np.all(my_discretization._input_sample_set._values >= input_left)

    if comm.size == 0:
        # compare the samples
        nptest.assert_array_equal(input_sample_set._values,
                              my_discretization._input_sample_set._values)
        # compare the data
        nptest.assert_array_equal(output_sample_set._values,
                              my_discretization._output_sample_set._values)
コード例 #39
0
ファイル: kriging_test.py プロジェクト: jomorlier/pyEGO
import numpy as np
from pyDOE import lhs
from kriging import kriging
from true_function import true_function
from ga import ga
from exp_imp import EGO

k = 2
n = 5*2

# sampling plan
X = lhs(k, samples=n)
y = np.zeros((n, 1))

# find true values
for i in range(k):
    y[i] = true_function(X[i], 1)

# create kriging model
kr = kriging(k, X, y)

# train model
kr.train()

# plot prediction
kr.plot_2d()

E = EGO(kr)
MinExpImp = 1e14
infill = 0
コード例 #40
0
    parser.add_argument('--num_dim', type=int, required=True)
    parser.add_argument('--num_parallels', type=int, required=True)
    parser.add_argument('--num_montecarlo', type=int, required=True)
    parser.add_argument('--num_speculative_iter', type=int, required=True)
    parser.add_argument('--max_gp_samples', type=int, required=True)
    args = parser.parse_args()

    # set config
    num_dim = args.num_dim
    num_parallels = args.num_parallels
    num_montecarlo = args.num_montecarlo
    num_speculative_iter = args.num_speculative_iter
    max_gp_samples = args.max_gp_samples

    # generate initial simplex using Latin hypercube sampling
    initial_simplex = 10 * lhs(num_dim, samples=num_dim + 1) - 5

    # optimize
    nm = NelderMead(
        benchmark,
        speculative_exec=True,
        num_montecarlo=num_montecarlo,
        num_speculative_iter=num_speculative_iter,
        num_parallels=num_parallels,
        max_gp_samples=max_gp_samples)
    x, fx, k = nm.optimize(initial_simplex, min_diam=1e-4)  # the results of NM (x, fx, # of iters)
    steps = nm.f.count  # # of eval steps
    evals = len(nm.f.keys)  # # of evaluations

    # the result of NM method
    print('x:\t%s' % (x))
コード例 #41
0
np.random.seed(1234)

if __name__ == "__main__":

    N = 12
    D = 1
    lb = -1.0 * np.ones(D)
    ub = 2.0 * np.ones(D)
    noise = 0.0

    def f(x):
        return x * np.sin(np.pi * x)

    # Training data
    X = lb + (ub - lb) * lhs(D, N)
    y = f(X) + noise * np.random.randn(N, D)

    # Test data
    nn = 200
    X_star = np.linspace(lb, ub, nn)[:, None]
    y_star = f(X_star)

    # Define model
    model = GPRegression(X, y)

    # Train
    model.train()

    # Predict
    y_pred, y_var = model.predict(X_star)
コード例 #42
0
    def create_macros_LHS(
        self,
        number_of_macros=None,
        criterion=None,  # noqa
        iterations=None,
        batch_size=None,
        append_default=False,
    ):
        """Generate AnyScript macros for Latin Hyper Cube Studies studies.

        Generates AnyScript macros for parameter studies using Latin hyper cube
        sampling. The macros added with the `SetValue_random` are created with
        with Latin Hypercube Sampling (LHS) of the parameter space. The number
        of generated macros determines the number of LHS samples.

        The method uses the pyDOE package to generate the LHS data.


        Parameters
        ----------
        number_of_macros : int (Optional)
            The number of macro to create.
        batch_size : int (Optional)
            If specified the function will return a generator which creates macros in
            batches.
        criterion : {None, 'c', 'm', 'cm', 'corr'}
            A a string that specifies how points are sampled
            (see: http://pythonhosted.org/pyDOE/randomized.html)
            `None` (Default) points are randomizes within the intervals.
            "center" or "c" center the points within the sampling intervals
            "maximin" or "m": maximize the minimum distance between points, but place
            the point in a randomized location within its interval
            "centermaximin" or "cm": same as “maximin”, but centered within the intervals
            "corr" minimize the maximum correlation coefficient
        iterations : int
            Specifies how many iterations are used to accomplished the selected criterion
        append_default : bool
            If True a macro with default (mean) values is appended to the returned list
            of returned macros


        Returns
        -------
        list or generator
            A list macros or a generator which creates macros in batches

        Examples
        --------
        >>> np.random.seed(1)
        >>> from scipy.stats.distributions import logistic, norm
        >>> log_dist = logistic( loc= [1,3,4],scale = [0.1,0.5,1] )
        >>> norm_dist = norm( loc= [0,0,0],scale = [0.1,0.5,1] )
        >>> cmd = [SetValue_random('Main.MyVar1', log_dist), SetValue_random('Main.MyVar2', norm_dist) ]
        >>> mg = AnyMacro(cmd, number_of_macros = 3)
        >>> mg.create_macros_LHS()
        [['classoperation Main.MyVar1 "Set Value" --value="{0.0928967116493,-0.591418725401,-0.484696993931}"',
        'classoperation Main.MyVar2 "Set Value" --value="{1.01049414425,2.46211329129,5.73806916203}"'],
        ['classoperation Main.MyVar1 "Set Value" --value="{-0.0166741228961,0.707722119582,-0.294180629253}"',
        'classoperation Main.MyVar2 "Set Value" --value="{1.11326829265,2.66016732923,4.28054911097}"'],
        ['classoperation Main.MyVar1 "Set Value" --value="{-0.20265197275,0.114947152258,0.924796936287}"',
        'classoperation Main.MyVar2 "Set Value" --value="{0.806864877696,4.4114188826,2.93941843565}"']]

        """  # noqa
        try:
            import pyDOE
        except ImportError:
            raise ImportError("The pyDOE package must be install to use this class")

        if batch_size is not None:
            return _batch(
                self.create_macros_LHS(number_of_macros, criterion, iterations),
                n=batch_size,
            )

        if number_of_macros is None:
            number_of_macros = self.number_of_macros

        if self.seed is not None:
            np.random.seed(self.seed)

        factors = sum([e.n_factors for e in self if isinstance(e, SetValue_random)])
        lhs_matrix = pyDOE.lhs(
            factors, number_of_macros, criterion=criterion, iterations=iterations
        )

        macro_list = []

        for macro_idx in range(number_of_macros):
            macro = []
            lhs_idx = 0
            for elem in self:
                if isinstance(elem, SetValue_random):
                    lhs_val = lhs_matrix[macro_idx, lhs_idx : lhs_idx + elem.n_factors]
                    mcr = elem.get_macro(macro_idx, lower_tail_probability=lhs_val)
                    lhs_idx += elem.n_factors
                else:
                    mcr = elem.get_macro(macro_idx)

                if self.counter_token:
                    mcr = mcr.replace(self.counter_token, str(macro_idx))
                if len(mcr) > 0:
                    macro.extend(mcr.split("\n"))
            macro_list.append(macro)
        if append_default:
            macro = []
            for elem in self:
                mcr = elem.get_macro(number_of_macros)
                if self.counter_token:
                    mcr = mcr.replace(self.counter_token, str(number_of_macros))
                if len(mcr) > 0:
                    macro.extend(mcr.split("\n"))
            macro_list.append(macro)
        return macro_list
コード例 #43
0
@author: Paris
"""

import numpy as np
import matplotlib.pyplot as plt
from pyDOE import lhs

if __name__ == "__main__":

    # N is the number of training points.
    # D_in is input dimension
    # D_out is output dimension.
    N, D_in, D_out = 64, 1, 1

    # Create random input and output data
    X = lhs(D_in, N)
    y = 5 * X + np.random.randn(N, D_out)

    # Randomly initialize weights
    w = np.random.randn(D_in, D_out)

    learning_rate = 1e-3
    for it in range(10000):
        # Forward pass: compute predicted y
        y_pred = np.matmul(X, w)

        # Compute and print loss
        loss = np.sum((y_pred - y)**2)
        print("Iteration: %d, loss: %f" % (it, loss))

        # Backprop to compute gradients of W with respect to loss
コード例 #44
0
ファイル: LHS.py プロジェクト: haghakhani/UQ_workshop
def LHS_UQ(num_sample,min1,range1,min2,range2):  
   
    #number of random dimension
    randoms=2
    
    lhd = pylhs.lhs(randoms, num_sample)
    lhs=np.zeros((num_sample,randoms))
    lhs[:,0]=lhd[:,0]*range1+min1
    lhs[:,1]=lhd[:,1]*range2+min2
    
    fp = open(os.path.join(os.getcwd(),'puffin/puffin.inp'),'r')
    simdata = fp.read()
    fp.close
    
    
    water_w=0.33
    temp=1111
    
    dircs_name='LHS'
    
    #delete the folders if they are already exist
    for i in range(0,num_sample): 
        dirname=os.path.join(os.getcwd(),dircs_name+str(i))
        if (os.path.isdir(dirname)):
            st.rmtree(os.path.join(os.getcwd(),dircs_name+str(i))) 
    
    #creates the folder for simulation
    for i in range(0,num_sample): 
        st.copytree('puffin',os.path.join(os.getcwd(),dircs_name+str(i))) 
    
    #replace the sample value with initial value in the list of input    
    for i in range(0,num_sample):        
#        print lhs[i,:]
        rep_water_w="{:.4f}".format(lhs[i,0])
        rp_simdata = simdata.replace(str(water_w),rep_water_w)
        rep_temp="{:.0f}".format(lhs[i,1])
        rp_simdata = rp_simdata.replace(str(temp), rep_temp)      
        fp = open(os.path.join(os.getcwd(),dircs_name+str(i)+'/puffin.inp'),'w')
        fp.write(rp_simdata)
        fp.flush()
        fp.close
#        print i
    
    # runs the code for each sample and generates the figures
    for i in range(0,num_sample):
        path=dircs_name+str(i)
        os.chdir(path)    
        os.system('./puffin>output')
        os.system('gnuplot field.gnu')
        image = sprcs.Popen(["display", "./conc.jpg"])
        time.sleep(1)
        image.kill()    
        os.chdir('..')
#        print i
        
    particle_flux=np.zeros(num_sample)
    eruption_height=np.zeros(num_sample)
    
    # reading the result of simulations from the output file    
    for i in range(0,num_sample): 
        dirname=os.path.join(os.getcwd(),dircs_name+str(i))
        fp = open(os.path.join(dirname,'output'),'r')
        for line in fp:
            if line.find("PARTICLE FLUX AT HB")>-1:
                info = line.split()
                particle_flux[i] = float(info[4])
            if line.find("ERUPTION PLUME HEIGHT")>-1:
                info = line.split()
                eruption_height[i]=float(info[3]) 
                
    h_mean=sum(eruption_height)/num_sample  
    h_std=np.sqrt(sum(np.square(eruption_height))/num_sample-h_mean*h_mean) 
    
    particle_flux_mean=sum(particle_flux)/num_sample  
    particle_flux_std=np.sqrt(sum(np.square(particle_flux))/num_sample-particle_flux_mean*particle_flux_mean)
    
    output=np.vstack((lhs[:,0],lhs[:,1],eruption_height,particle_flux)).T
    
    np.savetxt("lhs_output.csv", output, delimiter=",") 
    
    print ("Results of the experiment:")
    print ("eruption height for the samples:   ",eruption_height)
    print ("mean of height:                    ",h_mean)
    print ("standard deviation of height:      ",h_std)
    print ("particle flux for the samples:     ",particle_flux)
    print ("mean of particle flux:               %e"%particle_flux_mean)
    print ("standard deviation of particle flux: %e" %particle_flux_std)
コード例 #45
0
        f_u_star = self.sess.run(self.f_u_pred, tf_dict)

        return u_star, f_u_star


if __name__ == "__main__":
    N_b = 800  # number of boundary points
    N_f = 1000  # number of collocation points

    layers = [2, 20, 20, 20, 1]  #number of neurons in each layer
    num_train_its = 1000  #number of training iterations

    layers = [2, 200, 200, 200, 1]
    LB_Plate = np.array([0.0, 0.0])
    UB_Plate = np.array([2.0, 1.0])
    X_f = LB_Plate + (UB_Plate - LB_Plate) * lhs(
        2, N_f)  #Generating collocation points

    #define model parameters
    m = 1
    #mode number
    k = 4
    #wave number

    #solve for the constants A1 and A2 in the exact solution
    kx = np.sqrt(k**2 - (m * np.pi)**2)
    alpha = -k**2

    LHS = np.array([[1j * kx, -1j * kx],
                    [(k - kx) * np.exp(-2 * 1j * kx),
                     (k + kx) * np.exp(2 * 1j * kx)]])
    RHS = np.array([[1], [0]])
コード例 #46
0
ファイル: testing.py プロジェクト: JamesPHoughton/pysd
def sample_pspace(model, param_list=None, bounds=None, samples=100, seed=None):
    """
    A DataFrame where each row represents a location in the parameter
    space, locations distributed to exercise the full range of values
    that each parameter can take on.

    This is useful for quick and dirty application of tests to a bunch
    of locations in the sample space. Kind-of a fuzz-testing for
    the model.

    Uses latin hypercube sampling, with random values within
    the sample bins. The LHS sampler shuffles the bins each time,
    so a subsequent call will yield a different sample from the
    parameter space.

    When a variable has both upper and lower bounds, use a uniform
    sample between those bounds.

    When a variable has only one bound, use an exponential distribution
    with the scale set to be the difference between the bound and the
    current model value (1 if they are the same)

    When the variable has neither bound, use a normal distribution centered
    on the current model value, with scale equal to the absolute value
    of the model value (1 if that magnitude is 0)

    Parameters
    ----------
    model: pysd.Model object

    param_list: None or list of strings
        The real names of parameters to include in the explored parameter
        space.
        If None, uses all of the constants in the model except TIME STEP,
        INITIAL TIME, etc.

    bounds: DataFrame, string filename, or None
        A range test matrix as used for bounds checking.
        If None, creates one from the model
        These bounds can also place artificial limits on the
        parameter space you want to explore, even if the theoretical
        bounds on the variable are infinite.

    samples: int
        How many samples to include in the iterator?

    Returns
    -------
    lhs : pandas DataFrame
        distribution-weighted latin hypercube samples

    Note
    ----
    Executes the model by 1 time-step to get the current value of parameters.

    """
    if param_list is None:
        doc = model.doc()
        param_list = sorted(list(set(doc[doc['Type'] == 'constant']['Real Name']) -
                            {'FINAL TIME', 'INITIAL TIME', 'TIME STEP', 'TIME STEP'}))

    if isinstance(bounds, _pd.DataFrame):
        bounds = bounds.set_index('Real Name')
    elif bounds is None:
        bounds = create_bounds_test_matrix(model).set_index('Real Name')
    elif isinstance(bounds, str):
        if bounds.split('.')[-1] in ['xls', 'xlsx']:
            bounds = _pd.read_excel(bounds, sheetname='Bounds', index_col='Real Name')
        elif bounds.split('.')[-1] == 'csv':
            bounds = _pd.read_csv(bounds, index_col='Real Name', encoding='UTF-8')
        elif bounds.split('.')[-1] == 'tab':
            bounds = _pd.read_csv(bounds, sep='\t', index_col='Real Name', encoding='UTF-8')
        else:
            raise ValueError('Unknown file type: bounds')
    else:
        raise ValueError('Unknown type: bounds')

    if seed is not None:
        _np.random.seed(seed)

    unit_lhs = _pd.DataFrame(_pyDOE.lhs(n=len(param_list), samples=samples),
                             columns=param_list)  # raw latin hypercube sample

    res = model.run(return_timestamps=[model.components.initial_time()])
    lhs = _pd.DataFrame(index=unit_lhs.index)
    for param in param_list:
        lower, upper = bounds[['Min', 'Max']].loc[param]
        value = res[param].iloc[0]

        if lower == upper:
            lhs[param] = lower

        elif _np.isfinite(lower) and _np.isfinite(upper):  # np.isfinite(0)==True
            scale = upper - lower
            lhs[param] = _dist.uniform(lower, scale).ppf(unit_lhs[param])

        elif _np.isfinite(lower) and _np.isinf(upper):
            if lower == value:
                scale = 1
            else:
                scale = value - lower
            lhs[param] = _dist.expon(lower, scale).ppf(unit_lhs[param])

        elif _np.isinf(lower) and _np.isfinite(upper):  # np.isinf(-np.inf)==True
            if upper == value:
                scale = 1
            else:
                scale = upper - value
            lhs[param] = upper - _dist.expon(0, scale).ppf(unit_lhs[param])

        elif _np.isinf(lower) and _np.isinf(upper):  # np.isinf(-np.inf)==True
            if value == 0:
                scale = 1
            else:
                scale = abs(value)
            lhs[param] = _dist.norm(value, scale).ppf(unit_lhs[param])

        else:
            raise ValueError('Problem with lower: %s or upper: %s bounds' % (lower, upper))

    return lhs
コード例 #47
0
ファイル: Allen_Cahn_test.py プロジェクト: gitvicky/NPDE_TF1
X_lb = np.hstack(
    (T[:, 0:1], X[:, 0:1]
     ))  #Lower Boundary condition value of X (x = -1) and T (t = 0...0.99)
u_lb = Exact[:, 0:
             1]  #Bound Condition value of the field u at (x = 11) and T (t = 0...0.99)
X_ub = np.hstack(
    (T[:, -1:], X[:, -1:]
     ))  #Uppe r Boundary condition value of X (x = 1) and T (t = 0...0.99)
u_ub = Exact[:,
             -1:]  #Bound Condition value of the field u at (x = 11) and T (t = 0...0.99)

X_b = np.vstack((X_lb, X_ub))
u_b = np.vstack((u_lb, u_ub))

X_f = lb + (ub - lb) * lhs(2, N_f)  #Factors generated using LHS

idx = np.random.choice(X_i.shape[0], N_i, replace=False)
X_i = X_i[idx, :]  #Randomly Extract the N_u number of x and t values.
u_i = u_i[idx, :]  #Extract the N_u number of field values

idx = np.random.choice(X_b.shape[0], N_b, replace=False)
X_b = X_b[idx, :]  #Randomly Extract the N_u number of x and t values.
u_b = u_b[idx, :]  #Extract the N_u number of field values

training_data = {'X_i': X_i, 'u_i': u_i, 'X_b': X_b, 'u_b': u_b, 'X_f': X_f}

# %%

loss = model, input_dict = npde.main.setup(NN_parameters, NPDE_parameters,
                                           PDE_parameters, training_data,