예제 #1
0
    def __call__(self, design_vars, model=None):
        """
        Generate case.

        Parameters
        ----------
        design_vars : dict
            Dictionary of design variables for which to generate values.

        model : Group
            The model containing the design variables (not used).

        Yields
        ------
        list
            list of name, value tuples for the design variables.
        """
        if self._seed is not None:
            np.random.seed(self._seed)

        size = sum([meta['size'] for name, meta in iteritems(design_vars)])

        if self._samples is None:
            self._samples = size

        # generate design
        doe = pyDOE2.lhs(size, samples=self._samples,
                         criterion=self._criterion,
                         iterations=self._iterations,
                         random_state=self._seed)

        # yield desvar values for doe samples
        for row in doe:
            retval = []
            col = 0
            var = 0
            for name, meta in iteritems(design_vars):
                size = meta['size']
                val = np.empty(size)
                for k in range(size):
                    sample = row[col + k]

                    lower = meta['lower']
                    if isinstance(lower, np.ndarray):
                        lower = lower[k]

                    upper = meta['upper']
                    if isinstance(upper, np.ndarray):
                        upper = upper[k]

                    val[k] = lower + sample * (upper - lower)

                retval.append((name, val))
                var += 1
                col += size

            yield retval
예제 #2
0
파일: lhs.py 프로젝트: thesadman/smt
    def _ese(self, dim, nt):
        # Parameters of maximinESE procedure
        P0 = lhs(dim, nt, criterion=None)
        J = 20
        outer_loop = min(int(1.5 * dim), 30)
        inner_loop = min(20 * dim, 100)

        D0 = pdist(P0)
        R0 = np.corrcoef(P0)
        corr0 = np.max(np.abs(R0[R0 != 1]))
        phip0 = self._PhiP(P0)

        P, historic = self._maximinESE(P0,
                                       outer_loop=outer_loop,
                                       inner_loop=inner_loop,
                                       J=J,
                                       tol=1e-3,
                                       p=10,
                                       return_hist=True)
        return P
예제 #3
0
    def select_training_points(self, Nsamples=40, method="LH"):
        """Select training points from the chain to train the GPs.

        Note: this method does not use the "scale" parameter.
        
        Args:
            Nsamples (int): number of samples to use; defualt is 40
            method (string): keyword for selecting different ways of
                obtaining training points. Currently unused.
        
        """
        #Create LH training samples
        x = pyDOE2.lhs(len(self.chain_cov),
                       samples=Nsamples,
                       criterion="center",
                       iterations=5)

        #Transform them correctly
        x -= 0.5  #center the training points
        s = self.scale
        w = self.eigenvalues
        R = self.rotation_matrix

        #Snap the training points to the MCMC chain
        samples = np.dot(s * x[:] * np.sqrt(w), R.T)[:] + self.chain_means
        self.unrotated_samples = x
        self.unsnapped_samples = samples
        cov = self.chain_cov

        def sqdists(chain, s, cov):
            X = chain[:] - s
            r = np.linalg.solve(cov, X.T).T
            d = np.sum(X * r, axis=1)
            return d
        indices = np.array([np.argmin(sqdists(self.chain, s, cov)) \
                            for s in samples])

        #Include the max liklihood point
        best_ind = np.argmax(self.lnlikes)
        self.training_inds = np.append(indices, best_ind)
        return
예제 #4
0
def lhs_of_variables(variables_dict,
                     number_of_samples,
                     method='m',
                     random_seed=None):
    # generate a dict with the enumerated values to run for each variable in a latin hypercube experiment
    num_factors = len(
        variables_dict)  # get the number of factors for the experiment

    design = pd.DataFrame(pyDOE2.lhs(num_factors,
                                     samples=number_of_samples,
                                     criterion=method,
                                     random_state=random_seed),
                          columns=variables_dict.keys())

    for var in variables_dict:
        max_var_value = variables_dict[var][1]
        min_var_value = variables_dict[var][0]
        design[var] = design[var].apply(
            lambda x: (max_var_value - min_var_value) * x + min_var_value)

    return design
예제 #5
0
파일: lhs.py 프로젝트: vishalbelsare/smt
    def _ese(self, dim, nt, fixed_index=[], P0=[]):
        """
        Parameters
        ----------

        fixed_index : list
            When running an "ese" optimization, we can fix the indexes of
            the points that we do not want to modify

        """
        # Parameters of maximinESE procedure
        if len(fixed_index) == 0:
            P0 = lhs(dim, nt, criterion=None, random_state=self.random_state)
        else:
            P0 = P0
            self.random_state = np.random.RandomState()
        J = 20
        outer_loop = min(int(1.5 * dim), 30)
        inner_loop = min(20 * dim, 100)

        D0 = pdist(P0)
        R0 = np.corrcoef(P0)
        corr0 = np.max(np.abs(R0[R0 != 1]))
        phip0 = self._PhiP(P0)

        P, historic = self._maximinESE(
            P0,
            outer_loop=outer_loop,
            inner_loop=inner_loop,
            J=J,
            tol=1e-3,
            p=10,
            return_hist=True,
            fixed_index=fixed_index,
        )
        return P
예제 #6
0
def latin_hc(filename, num_evals):
    num_params = 5
    DataDir = "../Data/"

    # galsim parameters
    para1 = np.linspace(1e4, 1e5, num_evals)  # Flux
    para2 = np.linspace(0.1, 1., num_evals)  # Radius
    para3 = np.linspace(-0.5, 0.5, num_evals)  # g1
    para4 = np.linspace(-0.5, 0.5, num_evals)  # g2
    para5 = np.linspace(0.2, 0.4, num_evals)  # psf fwhm

    AllPara = np.vstack([para1, para2, para3, para4, para5])
    # AllLabels = [r'Flux', r'Radius', r'Shear g1', r'Shear g2', r'PSF fwhm']

    # latin hypercube
    lhd = pyDOE.lhs(AllPara.shape[0], samples=num_evals, criterion=None)  # c cm corr m
    idx = (lhd * num_evals).astype(int)
    AllCombinations = np.zeros((num_evals, AllPara.shape[0]))
    for i in range(AllPara.shape[0]):
        AllCombinations[:, i] = AllPara[i][idx[:, i]]
    # Delete row when g1**2 + g2**2 > 1
    del_rows = np.where(AllCombinations[:, 2]**2+AllCombinations[:, 3]**2 > 1.)[0]
    AllCombinations = np.delete(AllCombinations, del_rows, axis=0)
    np.savetxt(DataDir+filename, AllCombinations)
예제 #7
0
    def _compute(self, nt):
        """
        Implemented by sampling methods to compute the requested number of sampling points.

        The number of dimensions (nx) is determined based on `xlimits.shape[0]`.

        Arguments
        ---------
        nt : int
            Number of points requested.

        Returns
        -------
        ndarray[nt, nx]
            The sampling locations in the unit hypercube.
        """
        xlimits = self.options["xlimits"]
        nx = xlimits.shape[0]

        if isinstance(self.options["random_state"], np.random.RandomState):
            self.random_state = self.options["random_state"]
        elif isinstance(self.options["random_state"], int):
            self.random_state = np.random.RandomState(
                self.options["random_state"])
        else:
            self.random_state = np.random.RandomState()

        if self.options["criterion"] != "ese":
            return lhs(
                nx,
                samples=nt,
                criterion=self.options["criterion"],
                random_state=self.random_state,
            )
        elif self.options["criterion"] == "ese":
            return self._ese(nx, nt)
예제 #8
0
    parser.add_argument("-gamma_lo", help="Lower bound on gamma", default=1)
    parser.add_argument("-gamma_hi", help="Upper bound on gamma", default=10)

    args = parser.parse_args()
    outname = args.outname
    ntrain, npts = args.ntrain, args.npts
    rhos, rt = args.rhos, args.rt
    cosmo = args.cosmo
    rs_lo, rs_hi = args.rs_by_rt_lo, args.rs_by_rt_hi
    alpha_lo, alpha_hi = args.alpha_lo, args.alpha_hi
    beta_lo, beta_hi = args.beta_lo, args.beta_hi
    gamma_lo, gamma_hi = args.gamma_lo, args.gamma_hi

    r_by_rt = np.logspace(-2, np.log10(2), npts)
    ndim = 4
    design = lhs(ndim, ntrain, criterion='maximin')

    rs_arr = rt * design[:, 0] * (rs_hi - rs_lo) + rs_lo
    alpha_arr = design[:, 1] * (alpha_hi - alpha_lo) + alpha_lo
    beta_arr = design[:, 2] * (beta_hi - beta_lo) + beta_lo
    gamma_arr = design[:, 3] * (gamma_hi - gamma_lo) + gamma_lo

    cosmo = cosmology.setCosmology(cosmo)

    start = time()
    results = np.zeros((ntrain, ndim + npts)).astype('f4')
    gen = zip(range(ntrain), rs_arr, alpha_arr, beta_arr, gamma_arr)
    for i, rs, alpha, beta, gamma in gen:
        if np.mod(i, 100) == 0:
            print("...working on i = {0}".format(i))
예제 #9
0
                         eval_limit=5,
                         cache_file=cache_file,
                         cache_dir='test')
    driver = OptimizationDriver(init_problem, **driver_config)
    n_dim = 5

    ### Sampling Example

    ## Parametric sweep
    levels = np.array([1, 1, 4, 1, 1])
    design = pyDOE.fullfact(levels)
    levels[levels == 1] = 2
    ff_scaled = design / (levels - 1)

    ## Latin Hypercube
    lhs_scaled = pyDOE.lhs(n_dim, criterion='center', samples=12)

    ## Execute Candidates
    num_evals = driver.sample(ff_scaled,
                              design_name='test_s',
                              cache_file=cache_file)
    num_evals = driver.parallel_sample(lhs_scaled,
                                       design_name='test_p',
                                       cache_file=cache_file)

    ### Optimization Example

    ## Show humpday optimizers
    # for i, f in humpday.OPTIMIZERS:
    #     print(i, f.__name__)
예제 #10
0
    def execute_ga(self,
                   x0,
                   vlb,
                   vub,
                   vob,
                   bits,
                   pop_size,
                   max_gen,
                   random_state,
                   Pm=None,
                   Pc=0.5):
        """
        Perform the genetic algorithm.

        Parameters
        ----------
        x0 : ndarray
            Initial design values.
        vlb : ndarray
            Lower bounds array.
        vub : ndarray
            Upper bounds array. This includes over-allocation so that every point falls on an
            integer value.
        vob : ndarray
            Outer bounds array. This is purely for bounds check.
        bits : ndarray
            Number of bits to encode the design space for each element of the design vector.
        pop_size : int
            Number of points in the population.
        max_gen : int
            Number of generations to run the GA.
        random_state : np.random.RandomState, int
            Random state (or seed-number) which controls the seed and random draws.
        Pm : float or None
            Mutation rate.
        Pc : float
            Crossover rate.

        Returns
        -------
        ndarray
            Best design point.
        float
            Objective value at best design point.
        int
            Number of successful function evaluations.
        """
        comm = self.comm
        nobj = self.nobj
        self.lchrom = int(np.sum(bits))

        if nobj > 1:
            xopt = []
            fopt = []

            # Needs to be divisible by number of objectives because of tournament selection
            # strategy.
            if np.mod(pop_size, nobj) > 0:
                pop_size += nobj - np.mod(pop_size, nobj)
        else:
            xopt = copy.deepcopy(vlb)
            fopt = np.inf

            # Needs to be divisible by two because tournament selection pits one half of the
            # population against the other half.
            if np.mod(pop_size, 2) == 1:
                pop_size += 1

        self.npop = int(pop_size)
        fitness = np.zeros((self.npop, nobj))

        # If mutation rate is not provided as input
        if Pm is None:
            Pm = (self.lchrom + 1.0) / (2.0 * pop_size * np.sum(bits))
        elite = self.elite

        new_gen = np.round(
            lhs(self.lchrom,
                self.npop,
                criterion='center',
                random_state=random_state))
        new_gen[0] = self.encode(x0, vlb, vub, bits)

        # Main Loop
        nfit = 0
        for generation in range(max_gen + 1):
            old_gen = copy.deepcopy(new_gen)
            x_pop = self.decode(old_gen, vlb, vub, bits)

            # Evaluate fitness of points in this generation.
            if comm is not None:
                # Parallel

                # Since GA is random, ranks generate different new populations, so just take one
                # and use it on all.
                x_pop = comm.bcast(x_pop, root=0)

                cases = [((item, ii), None) for ii, item in enumerate(x_pop)
                         if np.all(item - vob <= 0)]

                # Pad the cases with some dummy cases to make the cases divisible amongst the procs.
                # TODO: Add a load balancing option to this driver.
                extra = len(cases) % comm.size
                if extra > 0:
                    for j in range(comm.size - extra):
                        cases.append(cases[-1])

                results = concurrent_eval(self.objfun,
                                          cases,
                                          comm,
                                          allgather=True,
                                          model_mpi=self.model_mpi)

                fitness[:] = np.inf
                for result in results:
                    returns, traceback = result

                    if returns:
                        val, success, ii = returns
                        if success:
                            fitness[ii, :] = val
                            nfit += 1

                    else:
                        # Print the traceback if it fails
                        print('A case failed:')
                        print(traceback)

            else:
                # Serial
                for ii in range(self.npop):
                    x = x_pop[ii]

                    if np.any(x - vob > 0):
                        # Exceeded bounds for integer variables that are over-allocated.
                        success = False
                    else:
                        fitness[ii, :], success, _ = self.objfun(x, 0)

                    if success:
                        nfit += 1
                    else:
                        fitness[ii, :] = np.inf

            # Find Pareto front.
            if nobj > 1:
                xopt, fopt = self.eval_pareto(x_pop, fitness, xopt, fopt)

            # Find best objective.
            else:
                # Elitism means replace worst performing point with best from
                # previous generation.
                if elite and generation > 0:
                    max_index = np.argmax(fitness[:, 0])
                    old_gen[max_index] = min_gen
                    x_pop[max_index] = min_x
                    fitness[max_index, 0] = min_fit

                # Find best performing point in this generation.
                min_fit = np.min(fitness)
                min_index = np.argmin(fitness)
                min_gen = old_gen[min_index]
                min_x = x_pop[min_index]

                if min_fit < fopt:
                    fopt = min_fit
                    xopt = min_x

            # Evolve new generation.

            if nobj > 1:
                new_gen, new_obj = self.tournament_multi_obj(old_gen, fitness)
            else:
                new_gen = self.tournament(old_gen, fitness[:, 0])

            new_gen = self.crossover(new_gen, Pc)
            new_gen = self.mutate(new_gen, Pm)

        return xopt, fopt, nfit
예제 #11
0
 def wrapper():
     return pydoe.lhs(self.dim, self.num_pts, iterations=1)
    def run_initial_values_benchmark(
            self, log_file_name='sellar/results/log_file_initial_values.txt'):
        if path.exists(log_file_name):
            remove(log_file_name)
        s = '################### Running robustness to initial values test ################### \n'

        logfile = open(log_file_name, 'a+')
        logfile.writelines(s)
        logfile.close()
        print(s)

        test_cases = [('SLSQP', 'full_analytic', False),
                      ('SLSQP', 'semi_analytic_fd', False),
                      ('SLSQP', 'monolythic_fd', True),
                      ('COBYLA', 'derivative_free', False)]
        for test_case in test_cases:
            optimizer = test_case[0]
            derivative_method = test_case[1]
            blackbox = test_case[2]
            prob_type = 'MDO'

            # Number of samples
            N = 50
            res_num_compute = {'MDF': [], 'IDF': [], 'HYBRID': [], 'NVH': []}

            variables = {'x': (0., 10.), 'z1': (-10.0, 10.), 'z2': (0., 10.)}

            # DOE
            doe = lhs(len(variables.keys()), samples=N, criterion='center')

            initial_values = {'x': (0., 10.), 'z': np.zeros(2)}
            # Perform an analysis for each sample
            for sample in doe:
                lower_bound = variables['x'][0]
                upper_bound = variables['x'][1]
                initial_values['x'] = (upper_bound -
                                       lower_bound) * sample[0] + lower_bound

                lower_bound = variables['z1'][0]
                upper_bound = variables['z1'][1]
                z1 = (upper_bound - lower_bound) * sample[1] + lower_bound

                lower_bound = variables['z2'][0]
                upper_bound = variables['z2'][1]
                z2 = (upper_bound - lower_bound) * sample[2] + lower_bound

                initial_values['z'] = np.array([z1, z2])

                prob_list = [
                    MDFProblem(name='MDF',
                               optimizer=optimizer,
                               derivative_method=derivative_method,
                               blackbox=blackbox,
                               log_file=log_file_name,
                               print=False),
                    IDFProblem(name='IDF',
                               optimizer=optimizer,
                               derivative_method=derivative_method,
                               blackbox=blackbox,
                               log_file=log_file_name,
                               print=False),
                    HybridProblem(name='HYBRID',
                                  optimizer=optimizer,
                                  derivative_method=derivative_method,
                                  blackbox=blackbox,
                                  log_file=log_file_name,
                                  print=False),
                    NVHProblem(name='NVH',
                               optimizer=optimizer,
                               derivative_method=derivative_method,
                               blackbox=blackbox,
                               log_file=log_file_name,
                               print=False)
                ]

                self.run_analysis(prob_list, initial_values=initial_values)

                for prob in prob_list:
                    if prob.post_analysis_results['success']:
                        res_num_compute[prob.name].append(
                            prob.post_analysis_results['num_compute'])

            # Result analysis
            for i, (key, value) in enumerate(res_num_compute.items()):
                s = '> ---------- Running ' + prob_type + ' using ' + key + ' formulation and ' + \
                    optimizer + ' optimizer with ' + derivative_method + ' ------------ \n'
                max_num_compute = max(value)
                min_num_compute = min(value)
                mean_num_compute = np.mean(value)
                median_num_compute = np.median(value)
                percentage_of_success = len(value) / N * 100.
                res = 'Max number of evaluations : ' + str(max_num_compute) + '\n' \
                      'Min number of evaluations : ' + str(min_num_compute) + '\n' \
                      'Mean number of evaluations : ' + str(mean_num_compute) + '\n' \
                      'Median number of evaluations : ' + str(median_num_compute) + '\n' \
                      'Percentage of success : ' + str(percentage_of_success) + '\n'
                s += s + res
                logfile = open(log_file_name, 'a+')
                logfile.writelines(s)
                logfile.close()
                print(s)
예제 #13
0
    def __data_inverse(self, data_row, num_samples, sampling_method):
        """Generates a neighborhood around a prediction.

        For numerical features, perturb them by sampling from a Normal(0,1) and
        doing the inverse operation of mean-centering and scaling, according to
        the means and stds in the training data. For categorical features,
        perturb by sampling according to the training distribution, and making
        a binary feature that is 1 when the value is the same as the instance
        being explained.

        Args:
            data_row: 1d numpy array, corresponding to a row
            num_samples: size of the neighborhood to learn the linear model
            sampling_method: 'gaussian' or 'lhs'

        Returns:
            A tuple (data, inverse), where:
                data: dense num_samples * K matrix, where categorical features
                are encoded with either 0 (not equal to the corresponding value
                in data_row) or 1. The first row is the original instance.
                inverse: same as data, except the categorical features are not
                binary, but categorical (as the original data)
        """
        is_sparse = sp.sparse.issparse(data_row)
        if is_sparse:
            num_cols = data_row.shape[1]
            data = sp.sparse.csr_matrix((num_samples, num_cols),
                                        dtype=data_row.dtype)
        else:
            num_cols = data_row.shape[0]
            data = np.zeros((num_samples, num_cols))
        categorical_features = range(num_cols)
        if self.discretizer is None:
            instance_sample = data_row
            scale = self.scaler.scale_
            mean = self.scaler.mean_
            if is_sparse:
                # Perturb only the non-zero values
                non_zero_indexes = data_row.nonzero()[1]
                num_cols = len(non_zero_indexes)
                instance_sample = data_row[:, non_zero_indexes]
                scale = scale[non_zero_indexes]
                mean = mean[non_zero_indexes]

            if sampling_method == 'gaussian':
                data = self.random_state.normal(
                    0, 1,
                    num_samples * num_cols).reshape(num_samples, num_cols)
                data = np.array(data)
            elif sampling_method == 'lhs':
                data = lhs(num_cols,
                           samples=num_samples).reshape(num_samples, num_cols)
                means = np.zeros(num_cols)
                stdvs = np.array([1] * num_cols)
                for i in range(num_cols):
                    data[:, i] = norm(loc=means[i],
                                      scale=stdvs[i]).ppf(data[:, i])
                data = np.array(data)
            else:
                warnings.warn(
                    '''Invalid input for sampling_method.
                                 Defaulting to Gaussian sampling.''',
                    UserWarning)
                data = self.random_state.normal(
                    0, 1,
                    num_samples * num_cols).reshape(num_samples, num_cols)
                data = np.array(data)

            if self.sample_around_instance:
                data = data * scale + instance_sample
            else:
                data = data * scale + mean
            if is_sparse:
                if num_cols == 0:
                    data = sp.sparse.csr_matrix(
                        (num_samples, data_row.shape[1]), dtype=data_row.dtype)
                else:
                    indexes = np.tile(non_zero_indexes, num_samples)
                    indptr = np.array(
                        range(0,
                              len(non_zero_indexes) * (num_samples + 1),
                              len(non_zero_indexes)))
                    data_1d_shape = data.shape[0] * data.shape[1]
                    data_1d = data.reshape(data_1d_shape)
                    data = sp.sparse.csr_matrix(
                        (data_1d, indexes, indptr),
                        shape=(num_samples, data_row.shape[1]))
            categorical_features = self.categorical_features
            first_row = data_row
        else:
            first_row = self.discretizer.discretize(data_row)
        data[0] = data_row.copy()
        inverse = data.copy()
        for column in categorical_features:
            values = self.feature_values[column]
            freqs = self.feature_frequencies[column]
            inverse_column = self.random_state.choice(values,
                                                      size=num_samples,
                                                      replace=True,
                                                      p=freqs)
            binary_column = (inverse_column == first_row[column]).astype(int)
            binary_column[0] = 1
            inverse_column[0] = data[0, column]
            data[:, column] = binary_column
            inverse[:, column] = inverse_column
        if self.discretizer is not None:
            inverse[1:] = self.discretizer.undiscretize(inverse[1:])
        inverse[0] = data_row
        return data, inverse
        result.write(templ.render(x=x, z=z))
    fh.write('%f %f %s\n' % (x, z, 'x_%.3f_z_%.3f' % (x, z)))
fh.close()

fh = open('nonapod_inputs_random_500/input_list', 'w')
for i in range(500):
    x = random.uniform(min_x, max_x)
    z = random.uniform(min_z, max_z)
    with open('nonapod_inputs_random_500/x_%.3f_z_%.3f' % (x, z),
              'w') as result:
        result.write(templ.render(x=x, z=z))
    fh.write('%f %f %s\n' % (x, z, 'x_%.3f_z_%.3f' % (x, z)))
fh.close()

# Lastly, the latin hypercube sampling approach:
sample_space_50 = lhs(2, samples=50)
sample_space_500 = lhs(2, samples=500)

# Transform those numbers in [0,1] to values in our space of interest
for arr in [sample_space_50, sample_space_500]:
    arr[:, 0] = arr[:, 0] * (max_x - min_x) + min_x
    arr[:, 1] = arr[:, 1] * (max_z - min_z) + min_z

fh = open('nonapod_inputs_latin_50/input_list', 'w')
for x, z in sample_space_50:
    with open('nonapod_inputs_latin_50/x_%.3f_z_%.3f' % (x, z), 'w') as result:
        result.write(templ.render(x=x, z=z))
    fh.write('%f %f %s\n' % (x, z, 'x_%.3f_z_%.3f' % (x, z)))
fh.close()

fh = open('nonapod_inputs_latin_500/input_list', 'w')
예제 #15
0
    def checkmethods(self):
        pathfile_bladegen = 'D:\\Program Files\\ANSYS Inc\\v180\\aisol\\BladeModeler\\BladeGen'
        pathfile_cfturbo = 'C:\\Program Files\\CFturbo 10.3\\cfturbo.exe'
        pathfile_workbench = 'D:\\Program Files\\ANSYS Inc\\v180\\Framework\\bin\\Win64\\RunWB2.exe'
        pathfile_cfxsolve = 'D:\\Program Files\\ANSYS Inc\\v180\\CFX\\bin\\cfx5solve.exe'
        pathfile_cfxmondata = 'D:\\Program Files\\ANSYS Inc\\v180\\CFX\\bin\\cfx5mondata.exe'
        pathfile_cfxpost = 'D:\\Program Files\\ANSYS Inc\\v180\\CFX\\bin\\cfx5post.exe'

        current_path = os.getcwd()
        filepath_results = (current_path + '\\b_results')
        if not os.path.isdir(filepath_results):
            os.mkdir(filepath_results)
        bladegen_bat = 'B1_bat_bladegen.txt'
        cfturbo_bat = 'B1_bat_cfturbo.txt'
        workbench_bat = 'B2_bat_workbench.txt'
        cfxsolve_bat = 'B3_bat_cfxsolve.txt'
        cfxmondata_bat = 'B4_bat_cfxmondata.txt'
        cfxpost_bat = 'B5_bat_cfxpost.txt'
        if os.path.isfile(bladegen_bat):
            os.remove(bladegen_bat)
        if os.path.isfile(cfturbo_bat):
            os.remove(cfturbo_bat)
        if os.path.isfile(workbench_bat):
            os.remove(workbench_bat)
        if os.path.isfile(cfxsolve_bat):
            os.remove(cfxsolve_bat)
        if os.path.isfile(cfxmondata_bat):
            os.remove(cfxmondata_bat)
        if os.path.isfile(cfxpost_bat):
            os.remove(cfxpost_bat)

        # write bladegen basic bat file
        bladegen_bat_base = 'A1_bat_bladegen.txt'
        fp_bladegen01_bat = open(current_path + '\\' +
                                 bladegen_bat_base).readlines()
        fp_bladegen02_bat = open(bladegen_bat, 'w')
        try:
            for eachline in fp_bladegen01_bat:
                fp_bladegen02_bat.write(
                    eachline.replace('bladegen_set_path', pathfile_bladegen))
        finally:
            fp_bladegen02_bat.close()

        # write cfturbo basic bat file
        cfturbo_bat_base = 'A1_bat_cfturbo.txt'
        fp_cfturbo01_bat = open(current_path + '\\' +
                                cfturbo_bat_base).readlines()
        fp_cfturbo02_bat = open(cfturbo_bat, 'w')
        try:
            for eachline in fp_cfturbo01_bat:
                fp_cfturbo02_bat.write(
                    eachline.replace('cfturbo_set_path', pathfile_cfturbo))
        finally:
            fp_cfturbo02_bat.close()

        # write workbench basic bat file
        workbench_bat_base = 'A2_bat_workbench.txt'
        fp_workbench01_bat = open(current_path + '\\' +
                                  workbench_bat_base).readlines()
        fp_workbench02_bat = open(workbench_bat, 'w')
        try:
            for eachline in fp_workbench01_bat:
                fp_workbench02_bat.write(
                    eachline.replace('workbench_set_path', pathfile_workbench))
        finally:
            fp_workbench02_bat.close()

        # write cfxsolve basic bat file
        core_cfx_num = self.core.currentText()
        cfxsolve_bat_base = 'A3_bat_cfxsolve.txt'
        fp_cfxsolve01_bat = open(current_path + '\\' +
                                 cfxsolve_bat_base).readlines()
        fp_cfxsolve02_bat = open(cfxsolve_bat, 'w')
        try:
            for eachline in fp_cfxsolve01_bat:
                fp_cfxsolve02_bat.write(eachline.replace('cfxsolve_set_path', pathfile_cfxsolve) \
                                        .replace('corenum', core_cfx_num))
        finally:
            fp_cfxsolve02_bat.close()

        # write cfxmondata basic bat file
        cfxmondata_bat_base = 'A4_bat_cfxmondata.txt'
        fp_cfxmondata01_bat = open(current_path + '\\' +
                                   cfxmondata_bat_base).readlines()
        fp_cfxmondata02_bat = open(cfxmondata_bat, 'w')
        try:
            for eachline in fp_cfxmondata01_bat:
                fp_cfxmondata02_bat.write(
                    eachline.replace('cfxmondata_set_path',
                                     pathfile_cfxmondata))
        finally:
            fp_cfxmondata02_bat.close()

        # write cfxpost basic bat file
        cfxpost_bat_base = 'A5_bat_cfxpost.txt'
        fp_cfxpost01_bat = open(current_path + '\\' +
                                cfxpost_bat_base).readlines()
        fp_cfxpost02_bat = open(cfxpost_bat, 'w')
        try:
            for eachline in fp_cfxpost01_bat:
                fp_cfxpost02_bat.write(
                    eachline.replace('cfxpost_set_path', pathfile_cfxpost))
        finally:
            fp_cfxpost02_bat.close()

        # write all results
        results_file_path = 'pump_performance_data.xlsx'
        columns_head = [
            'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'efficiency', 'head',
            'power'
        ]

        if self.cb1.currentText() == 'Des':
            self.cb2.setEnabled(False)
            self.cb3.setEnabled(False)
            self.cb4.setEnabled(False)
            x = np.array([[60.5, 45.8, 49.7, 54.8, 60.7, 60.0, 65.8]])
            pump_performance = objective_pump(x)
            all_data = np.hstack((x, pump_performance))
            write_results_excel(all_data, results_file_path, columns_head)
        elif self.cb1.currentText() == 'Opt':
            self.cb2.showPopup()
            self.cb3.setEnabled(False)
            self.cb4.setEnabled(False)
            if self.cb2.currentText() == 'DOE':
                self.cb3.showPopup()
                self.cb4.setEnabled(False)
                self.cb3.setEnabled(True)
                if self.cb3.currentText() == 'New':
                    x_initial = lhs(7, samples=5, criterion='center')
                    ub_array = np.array(
                        [70.0, 80.0, 80.0, 80.0, 80.0, 80.0, 68.0])
                    lb_array = np.array(
                        [50.0, 40.0, 40.0, 40.0, 40.0, 40.0, 58.0])
                    x = np.multiply(x_initial,
                                    (ub_array - lb_array)) + lb_array
                    filename_new_doe = 'DOE_new_design.xlsx'
                    if not os.path.isdir(filename_new_doe):
                        write_results_excel(
                            x, filename_new_doe,
                            ['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7'])
                    else:
                        os.rmdir(filename_new_doe)
                        write_results_excel(
                            x, filename_new_doe,
                            ['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7'])
                    pump_performance = objective_pump(x)
                    all_data = np.hstack((x, pump_performance))
                    write_results_excel(all_data, results_file_path,
                                        columns_head)
                elif self.cb3.currentText() == 'Existed':
                    filename_existed_doe = 'DOE_existed_design.xlsx'
                    df = pd.DataFrame(pd.read_excel(filename_existed_doe))
                    x = df.to_numpy()[..., 1:]
                    pump_performance = objective_pump(x)
                    all_data = np.hstack((x, pump_performance))
                    write_results_excel(all_data, results_file_path,
                                        columns_head)
            elif self.cb2.currentText() == 'AI':
                self.cb4.showPopup()
                self.cb3.setEnabled(False)
                self.cb4.setEnabled(True)
                f_objective = lambda x: objective_pump(x)
                numpop = 20
                numiter = 500
                functiondim = 2
                lb = -32 * np.ones([1, functiondim])
                ub = 32 * np.ones([1, functiondim])
                echo = 'on'
                if self.cb4.currentText() == 'PSO':
                    from pso_algorithm import pso
                    xgbest, fgbest, fbest = pso(f_objective, numpop,
                                                functiondim, lb, ub, numiter,
                                                echo)
                '''
                elif self.cb4.currentText() == 'GSA':
                    from gsa_algorithm import gsa
                    [xgbest,fgbest,fbest]=gsa(f_objective,numpop,functiondim,lb,ub,numiter,echo)
                elif self.cb4.currentText() == 'GA':
                    from ga_algorithm import ga
                    [xgbest,fgbest,fbest]=gsa(f_objective,numpop,functiondim,lb,ub,numiter,echo)
                elif self.cb4.currentText() == 'BA':
                    from ba_algorithm import ba
                    [xgbest,fgbest,fbest]=ba(f_objective,numpop,functiondim,lb,ub,numiter,echo)
                elif self.cb4.currentText() == 'ABC':
                    from abc_algorithm import abc
                    [xgbest,fgbest,fbest]=abc(f_objective,numpop,functiondim,lb,ub,numiter,echo)
                '''
                all_data = np.hstack((xgbest, fgbest))
                filename_global_best = 'IA_global_best.xlsx'
                write_results_excel(all_data, filename_global_best,
                                    ['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'y'])
                filename_iteration_best = 'IA_iteration_best.xlsx'
                write_results_excel(fbest, filename_iteration_best, ['y'])
예제 #16
0
    def run(
        self,
        seed,
        n,
        dims,
        sample_type,
        scale,
        scale_factor,
        outfile,
        x0,
        x1,
        n_line,
        hard_bounds,
    ):
        np.random.seed(seed)
        n_samples = n
        n_dims = dims
        hard_bounds = hard_bounds
        sample_type = sample_type
        if sample_type == "random":
            x = np.random.random((n_samples, n_dims))
        elif sample_type == "grid":
            subdivision = int(pow(n_samples, 1 / float(n_dims)))
            temp = [np.linspace(0, 1.0, subdivision) for i in range(n_dims)]
            X = np.meshgrid(*temp)
            x = np.stack([xx.flatten() for xx in X], axis=1)
        elif sample_type == "lhs":
            x = doe.lhs(n_dims, samples=n_samples, random_state=seed)
        elif sample_type == "lhd":
            _x = doe.lhs(n_dims, samples=n_samples, random_state=seed)
            x = norm(loc=0.5, scale=0.125).ppf(_x)
        elif sample_type == "star":
            _x = doe.doe_star.star(n_dims)[0]
            x = 0.5 * (_x + 1.0)  # transform to center at 0.5 (range 0-1)
        elif sample_type == "ccf" or sample_type == "ccc" or sample_type == "cci":
            _x = np.unique(doe.ccdesign(n_dims, face=sample_type), axis=0)
            x = 0.5 * (_x + 1.0)
        else:
            raise ValueError(sample_type +
                             " is not a valid choice for sample_type!")

        scales = process_scale(scale)

        if scales is not None:
            limits = []
            do_log = []
            for scale in scales:
                limits.append((scale[0], scale[1]))
                if len(scale) < 3:
                    scale.append("linear")
                if scale[2] == "log":
                    do_log.append(True)
                else:
                    do_log.append(False)
            x = scale_samples(x, limits, do_log=do_log)

        # scale the whole box
        x = scale_factor * x

        # add x0
        if x0 is not None:
            x0 = np.atleast_2d(np.load(x0))
            if scales is not None:
                sa = scale_factor * np.array(scales)[:, :2].astype("float")
                center = np.mean(sa, axis=1)
            else:
                center = scale_factor * 0.5
            # Loop over all x0 points
            all_x = []
            for _x0 in x0:

                _x = x + _x0 - center

                # replace the first entry with x0 for the random ones
                if sample_type == "lhs" or sample_type == "lhd":
                    _x[0] = _x0
                else:  # add it for the stencil points
                    _x = np.insert(_x, 0, _x0, axis=0)

                if x1 is not None:
                    x1 = np.load(x1)
                    line_range = np.linspace(0, 1, n_line + 1,
                                             endpoint=False)[1:]
                    line_samples = _x0 + np.outer(line_range, (x1 - _x0))
                    _x = np.vstack((_x, line_samples))
                all_x.append(_x)

            x = np.vstack(all_x)

        if hard_bounds:
            if scales is None:
                x = np.clip(x, 0, 1)
            else:
                for i, dim in enumerate(scales):
                    x[:, i] = np.clip(x[:, i], dim[0], dim[1])

        print(x)

        np.save(outfile, x)
예제 #17
0
 def wrapper():
     return pydoe.lhs(self.dim, self.num_pts, iterations=1)
예제 #18
0
def get_unit_latin_hypercube(dims, n_samples):
    return pyDOE2.lhs(n=dims, samples=n_samples, criterion='correlation')
예제 #19
0
def main():
    # num_lhc = 100
    # num_evals = np.logspace(2, 4, num=num_lhc)
    num_lhc = 1
    num_evals = [512]
    num_params = 5
    verbose = True
    np.random.seed(7)
    set_name = 'training'

    #########################################################################
    ####### Parameters -- these should follow the following syntax ########
    # para = np.linspace(lower_lim, upper_lim, total_eval)

    for n in range(num_lhc):
        nevals = np.int(num_evals[n])
        # cosmology parameters - excluding tau and dark energy
        para1 = np.linspace(1e4, 1e5, nevals)  # Flux
        para2 = np.linspace(0.1, 1., nevals)  # Radius
        para3 = np.linspace(-0.5, 0.5, nevals)  # g1
        para4 = np.linspace(-0.5, 0.5, nevals)  # g2
        para5 = np.linspace(0.2, 0.4, nevals)  # psf fwhm

        # redshift parameters
        if (num_params == 7):
            para6 = np.linspace(0.5, 1.5, nevals)  # z_m
            para7 = np.linspace(0.05, 0.5, nevals)  # FWHM
        # no other known option yet - can insert other options, or read from text file
        elif (num_params > 5):
            print("unknown parameter option")

        if (num_params == 5):
            AllPara = np.vstack([para1, para2, para3, para4, para5])
            AllLabels = [
                r'Flux', r'Radius', r'Shear g1', r'Shear g2', r'PSF fwhm'
            ]
        elif (num_params == 7):
            AllPara = np.vstack(
                [para1, para2, para3, para4, para5, para6, para7])
            AllLabels = [
                r'$\tilde{\omega}_m$', r'$\tilde{\omega}_b$',
                r'$\tilde{\sigma}_8$', r'$\tilde{'
                r'h}$', r'$\tilde{n}_s$', r'$\tilde{z}_m$', r'$\tilde{FWHM}$'
            ]

        #########################################################################
        # latin hypercube
        lhd = pyDOE.lhs(AllPara.shape[0], samples=nevals,
                        criterion=None)  # c cm corr m
        idx = (lhd * nevals).astype(int)
        AllCombinations = np.zeros((nevals, AllPara.shape[0]))
        for i in range(AllPara.shape[0]):
            AllCombinations[:, i] = AllPara[i][idx[:, i]]
        # Delete row when g1**2 + g2**2 > 1
        del_rows = np.where(
            AllCombinations[:, 2]**2 + AllCombinations[:, 3]**2 > 1.)[0]
        AllCombinations = np.delete(AllCombinations, del_rows, axis=0)
        # np.savetxt('lhc_'+str(nevals)+'_'+str(num_params)+'_'+set_name+'.txt', AllCombinations)

        # if verbose:
        # print(lhd)
        # lhd = norm(loc=0, scale=1).ppf(lhd)  # this applies to both factors here

        #
        if verbose:
            f, a = plt.subplots(AllPara.shape[0],
                                AllPara.shape[0],
                                sharex=True,
                                sharey=True)
            plt.subplots_adjust(left=None,
                                bottom=None,
                                right=None,
                                top=None,
                                wspace=None,
                                hspace=None)
            plt.rcParams.update({'font.size': 4})

        for i in range(AllPara.shape[0]):
            for j in range(i + 1):
                print(i, j)
                if (i != j):
                    a[i, j].scatter(lhd[:, i], lhd[:, j], s=1, alpha=0.7)
                    a[i, j].grid(True)
                    a[j, i].set_visible(False)

                else:
                    # a[i,i].set_title(AllLabels[i])
                    a[i, i].text(0.4, 0.4, AllLabels[i], size='x-large')
                    hist, bin_edges = np.histogram(lhd[:, i],
                                                   density=True,
                                                   bins=12)
                    # a[i,i].bar(hist)
                    a[i, i].bar(bin_edges[:-1],
                                hist / hist.max(),
                                width=0.09,
                                alpha=0.5)
                    plt.xlim(0, 1)
                    plt.ylim(0, 1)
        plt.tight_layout()
        plt.savefig('../Data/Plots/LatinSq.pdf',
                    figsize=(5000, 5000),
                    bbox_inches="tight",
                    dpi=900)
예제 #20
0
 opt_method = input(
     'Which method do you want to choose? design of experiment or intelligent algorithm(DOE/IA): '
 )
 while opt_method not in choose_opt_method:
     opt_method = input(
         'Which method do you want to choose? design of experiment or intelligent algorithm(DOE/IA): '
     )
 if opt_method == choose_opt_method[0]:
     print('DOE is choosed')
     choose_DOE = ('New', 'Existed')
     DOE_opt = input('Is the DOE New or Existed(New or Existed): ')
     while DOE_opt not in choose_DOE:
         DOE_opt = input('Is the DOE New or Existed(New or Existed): ')
     if DOE_opt == choose_DOE[0]:
         print('DOE is New')
         x_initial = lhs(7, samples=5, criterion='center')
         ub_array = np.array([70.0, 80.0, 80.0, 80.0, 80.0, 80.0, 68.0])
         lb_array = np.array([50.0, 40.0, 40.0, 40.0, 40.0, 40.0, 58.0])
         x = np.multiply(x_initial, (ub_array - lb_array)) + lb_array
         filename_new_doe = 'DOE_new_design.xlsx'
         if not os.path.isdir(filename_new_doe):
             write_results_excel(x, filename_new_doe,
                                 ['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7'])
         else:
             os.rmdir(filename_new_doe)
             write_results_excel(x, filename_new_doe,
                                 ['x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7'])
         pump_performance = objective_pump(x)
         all_data = np.hstack((x, pump_performance))
         write_results_excel(all_data, results_file_path, columns_head)
     else:
예제 #21
0
                                 cache_dir=run_name + '/' + case,
                                 reconnect_cache=False,
                                 write_csv=write_to_csv)
            driver = OptimizationDriver(prob.create_problem, **driver_config)

            # More information about sampling: https://pythonhosted.org/pyDOE/index.html
            if sampling_method is 'fullfact':
                # Full factorial parametric sweep
                levels = np.array([N_levels] * prob.get_problem_dimen())
                design = pyDOE.fullfact(levels)
                levels[levels == 1] = 2
                samples = design / (levels - 1)
            elif sampling_method is 'lhs':
                # Latin Hypercube Sampling of design space
                samples = pyDOE.lhs(prob.get_problem_dimen(),
                                    criterion='cm',
                                    samples=N_samples)

            # Saves sampling values for post-process analysis
            if save_samples:
                with open(driver.options['cache_dir'] + '/' + case + '_' +
                          sampling_method + '_samples.csv',
                          'w',
                          newline='') as f:
                    csv_writer = csv.writer(f)
                    csv_writer.writerows(samples)

            # Breaking sampling into small batches to reduce impacts of memory leak
            N_samples_per_batch = len(samples) / N_smb
            for i in range(N_smb):
                start = int(i * N_samples_per_batch)
예제 #22
0
def lhs(samples: int, attributes: int, *args, **kwargs) -> np.ndarray:
    return pyDOE2.lhs(attributes, samples, *args, **kwargs)
예제 #23
0
def generateTradeStudy(tradeStudy):
    fileName = os.getcwd() + '\\Results\\' + tradeStudy.fileName
    runHPOP = tradeStudy.runHPOP
    epoch = tradeStudy.epoch
    a = tradeStudy.a
    e = tradeStudy.e
    i = tradeStudy.i
    RAAN = tradeStudy.RAAN
    AoP = tradeStudy.AoP
    TA = tradeStudy.TA
    Cd = tradeStudy.Cd
    Cr = tradeStudy.Cr
    DragArea = tradeStudy.DragArea
    SunArea = tradeStudy.SunArea
    Mass = tradeStudy.Mass
    OrbPerCal = tradeStudy.OrbPerCal
    GaussQuad = tradeStudy.GaussQuad
    SigLvl = tradeStudy.SigLvl
    SolFlxFile = tradeStudy.SolFlxFile
    AtmDen = tradeStudy.AtmDen
    SecondOrderOblateness = tradeStudy.SecondOrderOblateness
    numberOfRuns = tradeStudy.numberOfRuns
    howToVary = tradeStudy.howToVary
    varyCols = tradeStudy.varyCols
    varyValues = tradeStudy.varyValues
    setSunAreaEqualToDragArea = tradeStudy.setSunAreaEqualToDragArea
    np.random.seed(seed=1)

    # Generate Additional Columns
    Rp = a * (1 - e)
    Ra = a * (1 + e)
    p = a * (1 - e**2)
    rs, vs = coe2rv(GM_earth * 1e-9, p, e, i * np.pi / 180, RAAN * np.pi / 180,
                    AoP * np.pi / 180, TA * np.pi / 180)
    x = rs[0]
    y = rs[1]
    z = rs[2]
    Vx = vs[0]
    Vy = vs[1]
    Vz = vs[2]
    CdAM = Cd * DragArea / Mass
    CrAM = Cr * SunArea / Mass

    # Generate Dataframe to store all of the runs
    data = [
        epoch, a, e, i, RAAN, AoP, TA, Rp, Ra, p, x, y, z, Vx, Vy, Vz, Cd, Cr,
        DragArea, SunArea, Mass, CdAM, CrAM, OrbPerCal, GaussQuad, SigLvl,
        SolFlxFile, AtmDen, SecondOrderOblateness
    ]

    columns = [
        'epoch', 'a', 'e', 'i', 'RAAN', 'AoP', 'TA', 'Rp', 'Ra', 'p', 'x', 'y',
        'z', 'Vx', 'Vy', 'Vz', 'Cd', 'Cr', 'Drag Area', 'Sun Area', 'Mass',
        'Cd*Drag Area/Mass', 'Cr*Sun Area/Mass', 'Orb Per Calc',
        'Gaussian Quad', 'Flux Sigma Level', 'SolarFluxFile', 'Density Model',
        '2nd Order Oblateness'
    ]

    df = pd.DataFrame(data=data, index=columns).T
    df[df.columns[:-3]] = df[df.columns[:-3]].astype(float)

    # Grid Search
    if howToVary.lower() == 'gridsearch':
        # Create grid search of parameters and update the Dataframe
        numOfLevels = [len(val) for val in varyValues]
        runs = fullfact(numOfLevels).astype(int)
        paramdf = pd.DataFrame()

        for ii in range(len(runs.T)):
            paramdf[varyCols[ii]] = varyValues[ii][runs[:, ii]]

        df = pd.concat([df] * len(runs), ignore_index=True)

        for col in paramdf.columns:
            df[col] = paramdf[col]

    # Latin Hypercube
    elif howToVary.lower() == 'latinhypercube':
        # Generate runs
        lhd = lhs(len(varyCols), samples=numberOfRuns)
        #     lhd = stats.norm(loc=0, scale=1).ppf(lhd) # Convert to a normal distribution
        lhd = pd.DataFrame(lhd)

        adjustEpoch = False
        if 'epoch' in varyCols:
            date1 = yydddToDatetime(varyValues[0][0])
            date2 = yydddToDatetime(varyValues[0][1])
            deltaDays = lhd[0] * (date2 - date1).days
            minDate = [varyValues[0][0] for i in range(numberOfRuns)]
            varyCols.remove('epoch')
            varyValues = varyValues[1:]
            lhd = lhd.drop(0, axis=1)
            adjustEpoch = True

        lhd.columns = varyCols

        # Replace string columns with categories
        strii = [
            ii for ii in range(len(varyValues))
            if isinstance(varyValues[ii][0], str)
        ]
        for ii in strii:
            lhd.iloc[:, ii] = pd.cut(lhd.iloc[:, ii],
                                     len(varyValues[ii]),
                                     labels=varyValues[ii])
        # Replace float columns with values in range
        varyValues = np.array(varyValues)
        floatii = lhd.dtypes == float
        lhsMinMax = varyValues[floatii]
        lhsMinMax = np.concatenate(lhsMinMax, axis=0).reshape(-1, 2)
        lhd.loc[:, floatii] = lhd.loc[:, floatii] * (
            lhsMinMax[:, 1] - lhsMinMax[:, 0]) + lhsMinMax[:, 0]

        indxs = [
            ii for ii in range(len(varyCols)) if varyCols[ii] in
            ['Orb Per Calc', 'Gaussian Quad', 'Flux Sigma Level']
        ]
        lhd.iloc[:, indxs] = lhd.iloc[:, indxs].round()

        # Create df
        df = pd.concat([df] * len(lhd), ignore_index=True)

        if adjustEpoch == True:
            df['epoch'] = [
                adjustDate(yyddd, deltaDay)
                for yyddd, deltaDay in zip(minDate, deltaDays)
            ]

        for col in lhd.columns:
            df[col] = lhd[col]

    # Perturb
    elif howToVary.lower() == 'perturb':
        # Sample from a normal distribution
        rv = stats.norm()
        rvVals = rv.rvs((numberOfRuns, len(varyCols)))

        # Create perturbation df
        pertdf = pd.DataFrame(rvVals) * varyValues
        pertdf.columns = varyCols

        # Duplicate original df by the number of runs
        df = pd.concat([df] * numberOfRuns, ignore_index=True)

        if 'epoch' in varyCols:
            df['epoch'] = [
                adjustDate(yyddd, deltaDay)
                for yyddd, deltaDay in zip(df['epoch'], pertdf['epoch'])
            ]
            varyCols.remove('epoch')
            varyValues = varyValues[1:]

        for col in varyCols:
            df[col] = df[col] + pertdf[col]

    # Update dependant values
    df = updateDf(df, runHPOP, varyCols, setSunAreaEqualToDragArea)

    # Convert all columns to floats and round to the 10th decimal, otherwise there are some numerical rounding issues.
    cols = [
        col for col in df.columns if col not in
        ['SolarFluxFile', 'Density Model', '2nd Order Oblateness']
    ]
    df[cols] = df[cols].astype(float)
    for col in cols:
        df[col] = np.round(df[col], 10)

    # Add results
    df['LT Orbits'] = np.nan
    df['LT Years'] = np.nan
    df['LT Runtime'] = np.nan
    if runHPOP == True:
        df['HPOP Years'] = np.nan
        df['HPOP Runtime'] = np.nan

    df.index.name = 'Run ID'
    df = df.reset_index()

    df.to_csv(fileName)  # Create a new csv to store the results

    return df
    def run_initial_values_benchmark(self, log_file_name='ema/results/log_file_initial_values.txt'):
        if path.exists(log_file_name):
            remove(log_file_name)
        s = '################### Running robustness to initial values test ################### \n'

        logfile = open(log_file_name, 'a+')
        logfile.writelines(s)
        logfile.close()
        print(s)

        # Set to true to print detail of each sample
        print_option = False

        test_cases = [('SLSQP', 'full_analytic', False), ('SLSQP', 'semi_analytic_fd', False)]
        for test_case in test_cases:
            optimizer = test_case[0]
            derivative_method = test_case[1]
            blackbox = test_case[2]
            prob_type = 'MDO'

            # Number of samples
            N = 50
            res_num_compute = {
                'MDF': [],
                'IDF': [],
                'HYBRID': [],
                'NVH': []
            }

            # Scale to make sure that a solution exist
            k_os = 1.

            # Bounds of the variables of the DOE
            # lower_bound = 0.1
            # upper_bound = 1.

            variables = {
                'N_red': (0.1, 8.),
                'J_mot': (1e-6, 1.e-1),
                'T_em': (0.1, 100.)
            }

            # DOE
            doe = lhs(len(variables.keys()), samples=N, criterion='center')

            # Perform an analysis for each sample
            for sample in doe:
                initial_values = copy.copy(variables)
                for i, (key, value) in enumerate(initial_values.items()):
                    upper_bound = value[1]
                    lower_bound = value[0]
                    initial_values[key] = (upper_bound - lower_bound) * sample[i] + lower_bound

                prob_list = [
                    MDFProblem(name='MDF', optimizer=optimizer, derivative_method=derivative_method,
                               blackbox=blackbox, log_file=log_file_name, scale=k_os, print=print_option),
                    IDFProblem(name='IDF', optimizer=optimizer, derivative_method=derivative_method,
                               blackbox=blackbox, log_file=log_file_name, scale=k_os, print=print_option),
                    HybridProblem(name='HYBRID', optimizer=optimizer, derivative_method=derivative_method,
                                  blackbox=blackbox, log_file=log_file_name, scale=k_os, print=print_option),
                    NVHProblem(name='NVH', optimizer=optimizer, derivative_method=derivative_method,
                               blackbox=blackbox, log_file=log_file_name, scale=k_os, print=print_option)]

                self.run_analysis(prob_list, initial_values=initial_values)
                
                for prob in prob_list:
                    if prob.post_analysis_results['success']:
                        res_num_compute[prob.name].append(prob.post_analysis_results['num_compute'])
            
            
            # Result analysis
            for i, (key, value) in enumerate(res_num_compute.items()):
                s = '> ---------- Running ' + prob_type + ' using ' + \
                    key + ' formulation and ' + optimizer + ' optimizer with ' + \
                    derivative_method + ' at scale ' + str(k_os) + ' ------------ \n'
                try:
                    max_num_compute = max(value)
                    min_num_compute = min(value)
                    mean_num_compute = np.mean(value)
                    median_num_compute = np.median(value)
                    percentage_of_success = len(value) / N * 100.
                    res = 'Max number of evaluations : ' + str(max_num_compute) + '\n' \
                        'Min number of evaluations : ' + str(min_num_compute) + '\n' \
                        'Mean number of evaluations : ' + str(mean_num_compute) + '\n' \
                        'Median number of evaluations : ' + str(median_num_compute) + '\n' \
                        'Percentage of success : ' + str(percentage_of_success) + '\n'
                    s += s + res
                except:
                    s += s + 'All samples failed. \n'
                    pass
                logfile = open(log_file_name, 'a+')
                logfile.writelines(s)
                logfile.close()
                print(s)
예제 #25
0
    def execute_ga(self, x0, vlb, vub, vob, bits, pop_size, max_gen, random_state, Pm=None, Pc=0.5):
        """
        Perform the genetic algorithm.

        Parameters
        ----------
        x0 : ndarray
            Initial design values
        vlb : ndarray
            Lower bounds array.
        vub : ndarray
            Upper bounds array. This includes over-allocation so that every point falls on an
            integer value.
        vob : ndarray
            Outer bounds array. This is purely for bounds check.
        bits : ndarray
            Number of bits to encode the design space for each element of the design vector.
        pop_size : int
            Number of points in the population.
        max_gen : int
            Number of generations to run the GA.
        random_state : np.random.RandomState, int
            Random state (or seed-number) which controls the seed and random draws.
        Pm : float or None
            Mutation rate
        Pc : float
            Crossover rate

        Returns
        -------
        ndarray
            Best design point
        float
            Objective value at best design point.
        int
            Number of successful function evaluations.
        """
        comm = self.comm
        xopt = copy.deepcopy(vlb)
        fopt = np.inf
        self.lchrom = int(np.sum(bits))

        if np.mod(pop_size, 2) == 1:
            pop_size += 1
        self.npop = int(pop_size)
        fitness = np.zeros((self.npop, ))

        # If mutation rate is not provided as input
        if Pm is None:
            Pm = (self.lchrom + 1.0) / (2.0 * pop_size * np.sum(bits))
        elite = self.elite

        new_gen = np.round(lhs(self.lchrom, self.npop, criterion='center',
                               random_state=random_state))
        new_gen[0] = self.encode(x0, vlb, vub, bits)

        # Main Loop
        nfit = 0
        for generation in range(max_gen + 1):
            old_gen = copy.deepcopy(new_gen)
            x_pop = self.decode(old_gen, vlb, vub, bits)

            # Evaluate points in this generation.
            if comm is not None:
                # Parallel

                # Since GA is random, ranks generate different new populations, so just take one
                # and use it on all.
                x_pop = comm.bcast(x_pop, root=0)

                cases = [((item, ii), None) for ii, item in enumerate(x_pop)
                         if np.all(item - vob <= 0)]

                # Pad the cases with some dummy cases to make the cases divisible amongst the procs.
                # TODO: Add a load balancing option to this driver.
                extra = len(cases) % comm.size
                if extra > 0:
                    for j in range(comm.size - extra):
                        cases.append(cases[-1])

                results = concurrent_eval(self.objfun, cases, comm, allgather=True,
                                          model_mpi=self.model_mpi)

                fitness[:] = np.inf
                for result in results:
                    returns, traceback = result

                    if returns:
                        val, success, ii = returns
                        if success:
                            fitness[ii] = val
                            nfit += 1

                    else:
                        # Print the traceback if it fails
                        print('A case failed:')
                        print(traceback)

            else:
                # Serial
                for ii in range(self.npop):
                    x = x_pop[ii]

                    if np.any(x - vob > 0):
                        # Exceeded bounds for integer variables that are over-allocated.
                        success = False
                    else:
                        fitness[ii], success, _ = self.objfun(x, 0)

                    if success:
                        nfit += 1
                    else:
                        fitness[ii] = np.inf

            # Elitism means replace worst performing point with best from previous generation.
            if elite and generation > 0:
                max_index = np.argmax(fitness)
                old_gen[max_index] = min_gen
                x_pop[max_index] = min_x
                fitness[max_index] = min_fit

            # Find best performing point in this generation.
            min_fit = np.min(fitness)
            min_index = np.argmin(fitness)
            min_gen = old_gen[min_index]
            min_x = x_pop[min_index]

            if min_fit < fopt:
                fopt = min_fit
                xopt = min_x

            # Evolve new generation.
            new_gen = self.tournament(old_gen, fitness)
            new_gen = self.crossover(new_gen, Pc)
            new_gen = self.mutate(new_gen, Pm)

        return xopt, fopt, nfit
예제 #26
0
    def execute_ga(self, vlb, vub, bits, pop_size, max_gen, random_state):
        """
        Perform the genetic algorithm.

        Parameters
        ----------
        vlb : ndarray
            Lower bounds array.
        vub : ndarray
            Upper bounds array.
        bits : ndarray
            Number of bits to encode the design space for each element of the design vector.
        pop_size : int
            Number of points in the population.
        max_gen : int
            Number of generations to run the GA.
        random_state : np.random.RandomState, int
             Random state (or seed-number) which controls the seed and random draws.

        Returns
        -------
        ndarray
            Best design point
        float
            Objective value at best design point.
        int
            Number of successful function evaluations.
        """
        comm = self.comm
        xopt = copy.deepcopy(vlb)
        fopt = np.inf
        self.lchrom = int(np.sum(bits))

        if np.mod(pop_size, 2) == 1:
            pop_size += 1
        self.npop = int(pop_size)
        fitness = np.zeros((self.npop, ))

        Pc = 0.5
        Pm = (self.lchrom + 1.0) / (2.0 * pop_size * np.sum(bits))
        elite = self.elite

        # TODO: from an user-supplied intial population
        # new_gen, lchrom = encode(x0, vlb, vub, bits)
        new_gen = np.round(
            lhs(self.lchrom,
                self.npop,
                criterion='center',
                random_state=random_state))

        # Main Loop
        nfit = 0
        for generation in range(max_gen + 1):
            old_gen = copy.deepcopy(new_gen)
            x_pop = self.decode(old_gen, vlb, vub, bits)

            # Evaluate points in this generation.
            if comm is not None:
                # Parallel

                # Since GA is random, ranks generate different new populations, so just take one
                # and use it on all.
                x_pop = comm.bcast(x_pop, root=0)

                cases = [((item, ii), None) for ii, item in enumerate(x_pop)]

                results = concurrent_eval(self.objfun,
                                          cases,
                                          comm,
                                          allgather=True,
                                          model_mpi=self.model_mpi)

                fitness[:] = np.inf
                for result in results:
                    returns, traceback = result

                    if returns:
                        val, success, ii = returns
                        if success:
                            fitness[ii] = val
                            nfit += 1

                    else:
                        # Print the traceback if it fails
                        print('A case failed:')
                        print(traceback)

            else:
                # Serial
                for ii in range(self.npop):
                    x = x_pop[ii]
                    fitness[ii], success, _ = self.objfun(x, 0)

                    if success:
                        nfit += 1
                    else:
                        fitness[ii] = np.inf

            # Elitism means replace worst performing point with best from previous generation.
            if elite and generation > 0:
                max_index = np.argmax(fitness)
                old_gen[max_index] = min_gen
                x_pop[max_index] = min_x
                fitness[max_index] = min_fit

            # Find best performing point in this generation.
            min_fit = np.min(fitness)
            min_index = np.argmin(fitness)
            min_gen = old_gen[min_index]
            min_x = x_pop[min_index]

            if min_fit < fopt:
                fopt = min_fit
                xopt = min_x

            # Evolve new generation.
            new_gen = self.tournament(old_gen, fitness)
            new_gen = self.crossover(new_gen, Pc)
            new_gen = self.mutate(new_gen, Pm)

        return xopt, fopt, nfit
    nsims = main.nsims
except AttributeError:
    nsims = 5

# import parameter ranges table
loginfo("Reading in lhs parameter range data from <" + dir_path +
        "\input\lhs\lhs_param_ranges_vvwm.csv>.")
param_ranges = pd.read_csv(
    os.path.join(dir_path, "input", "lhs", "lhs_param_ranges_vvwm.csv"))
print(param_ranges)

# create list of input parameter names
param_names = param_ranges["Parameter"].to_list()

# conduct lhs sampling
lhs_design = lhs(n=len(param_names), samples=nsims)
print("LHS Design w/o Uniform: ", "\n", lhs_design.round(2))

for i in range(0, len(param_names)):
    lhs_design[:, i] = param_ranges.loc[i, "Min"] + (lhs_design[:, i]) * (
        param_ranges.loc[i, "Range"])  #JMS 10-20-20
print("Uniformly Sampled from LHS Design: ", "\n", lhs_design)

# convert to data frame
lhs_df = pd.DataFrame(lhs_design, columns=param_names)
print(round(lhs_df, 3))

# write out
loginfo("Writing simulated parameter data to <" + dir_path +
        "\io\lhs_sampled_params_vvwm.csv>.")
lhs_df.to_csv(os.path.join(dir_path, "io", "lhs_sampled_params_vvwm.csv"))
예제 #28
0
# import parameter ranges table
loginfo("Reading in lhs parameter range data from <" + dir_path +
        r"\input\lhs\lhs_param_ranges.csv>.")
param_ranges = pd.read_csv(
    os.path.join(dir_path, "input", "lhs", "lhs_param_ranges.csv"))
print(param_ranges)

# create list of input parameter names
param_names = param_ranges["Parameter"].to_list()

# parameter conditions:
# por >= fc (happens automatically), fc >= wp, MaxRate>=MinRate

# conduct lhs sampling
lhs_design = lhs(
    n=len(param_names), samples=3 * nsims
)  # take 3x as many samples as needed, in case some don't meet conditions
# uniformly sample
for i in range(0, len(param_names)):
    lhs_design[:, i] = param_ranges.loc[
        i, "Min"] + (lhs_design[:, i]) * (param_ranges.loc[i, "Range"])
# filter on conditions: fc >= wp, MaxRate >= MinRate
lhs_design = lhs_design[np.where(
    lhs_design[:, 11] < lhs_design[:, 10])[0]]  # fc >= wp
lhs_design = lhs_design[np.where(
    lhs_design[:, 6] < lhs_design[:, 5])[0]]  # MaxRate >= MinRate
lhs_design = lhs_design[:nsims]  # only take first nsims rows meeting condition

# convert to data frame
lhs_df = pd.DataFrame(lhs_design, columns=param_names)
print(round(lhs_df, 3))
    def run_doe_benchmark(self, log_file_name='ema/results/log_file_doe.txt'):
        if path.exists(log_file_name):
            remove(log_file_name)
        s = '################### Running robustness to doe test ################### \n'

        logfile = open(log_file_name, 'a+')
        logfile.writelines(s)  
        logfile.close()
        print(s)

        test_cases = [('SLSQP', 'full_analytic', False), ('SLSQP', 'semi_analytic_fd', False),
                      ('SLSQP', 'monolythic_fd', True), ('COBYLA', 'derivative_free', False)]
        for test_case in test_cases:
            optimizer = test_case[0]
            derivative_method = test_case[1]
            blackbox = test_case[2]
            prob_type = 'MDA'

            # Set to true to print detail of each sample
            print_option = False

            # Number of samples
            N = 50
            res_num_compute = {
                'MDF': [],
                'IDF': [],
                'HYBRID': [],
                'NVH': []
            }

            # Scale to make sure that a solution exist for IDF and HYBRID
            k_os = 2.

            variables = {
                'F_ema': (7.e4 * 0.5, 7.e4 * 2.),
                'N_red': (1., 10.),
                'p': (1.59e-3 * 0.5, 1.59e-3 * 2.),
                'A_max': (2. * 0.5, 2.* 2.)
            }

            # DOE
            doe = lhs(len(variables.keys()), samples=N, criterion='center')

            # Perform an analysis for each sample
            for sample in doe:
                initial_values = copy.copy(variables)
                for i, (key, value) in enumerate(initial_values.items()):
                    upper_bound = value[1]
                    lower_bound = value[0]
                    initial_values[key] = (upper_bound - lower_bound) * sample[i] + lower_bound

                prob_list = [
                    MDFProblem(name='MDF', prob_type=prob_type, optimizer=optimizer,
                               derivative_method=derivative_method, blackbox=blackbox,
                               log_file=log_file_name, scale=k_os, print=print_option),
                    IDFProblem(name='IDF', prob_type=prob_type, optimizer=optimizer,
                               derivative_method=derivative_method, blackbox=blackbox,
                               log_file=log_file_name, scale=k_os, print=print_option),
                    HybridProblem(name='HYBRID', prob_type=prob_type, optimizer=optimizer,
                                  derivative_method=derivative_method, blackbox=blackbox,
                                  log_file=log_file_name, scale=k_os, print=print_option),
                    NVHProblem(name='NVH', prob_type=prob_type, optimizer=optimizer,
                               derivative_method=derivative_method, blackbox=blackbox,
                               log_file=log_file_name, scale=k_os, print=print_option)]

                self.run_analysis(prob_list, initial_values=initial_values)
                
                for prob in prob_list:
                    if prob.post_analysis_results['success']:
                        res_num_compute[prob.name].append(prob.post_analysis_results['num_compute'])
            
            
            # Result analysis
            for i, (key, value) in enumerate(res_num_compute.items()):
                s = '> ---------- Running ' + prob_type + ' using ' + key + ' formulation and ' + \
                    optimizer + ' optimizer with ' + derivative_method + \
                    ' at scale ' + str(k_os) + ' ------------ \n'
                if len(value) == 0:
                    value = [0.]
                max_num_compute = max(value)
                min_num_compute = min(value)
                mean_num_compute = np.mean(value)
                median_num_compute = np.median(value)
                percentage_of_success = len(value) / N * 100.
                res = 'Max number of evaluations : ' + str(max_num_compute) + '\n' \
                    'Min number of evaluations : ' + str(min_num_compute) + '\n' \
                    'Mean number of evaluations : ' + str(mean_num_compute) + '\n' \
                    'Median number of evaluations : ' + str(median_num_compute) + '\n' \
                    'Percentage of success : ' + str(percentage_of_success) + '\n'
                s += s + res
                logfile = open(log_file_name, 'a+')
                logfile.writelines(s)  
                logfile.close()
                print(s)
예제 #30
0
def lhsdesign(n, min_range, max_range, k=5, include_vertices=False):
    """Returns the Latin Hypercube Sampling for a given range of values.

    Parameters
    ----------
    n : int
        Number of samples of the hypercube.
    min_range : np.array
        1-by-p or p-by-1 array containing the minimum values for each variable.
    max_range : np.array
        1-by-p or p-by-1 array containing the maximum values for each variable.
    k : int, optional
        Number of iterations to attempt to improve the design.
    include_vertices : bool
        To include or not the vertices of the hypercube in the sample.

    Returns
    -------
    out : np.array
        n-by-p array containing the Latin Hypercube Sampling.

    Raises
    ------
    ValueError
        If ndim of either `min_range` or `max_range` is not 2.

        If the `min_range` or `max_range` aren't vectors.


    """

    # check input ranges dimensions. If ndim != 1, raise error
    if min_range.ndim != 1 or max_range.ndim != 1:
        raise ValueError("Input ranges must be 1D arrays.")
    else:
        # both have ndim == 1, check if they have the same size
        if min_range.size != max_range.size:
            raise ValueError(
                "min_range and max_range must have the same number of elements"
            )

        # min_range = min_range.reshape(1, -1)
        # max_range = max_range.reshape(1, -1)

        p = min_range.size

    # proceed with normal calculations
    slope = np.tile(max_range - min_range, (n, 1))
    offset = np.tile(min_range, (n, 1))

    # create normalized LH
    x_normalized = lhs(p, samples=n, iterations=k, criterion='maximin')

    if include_vertices:
        vertices = get_vertices(min_range, max_range)

        # scale and return the LH
        return np.vstack((x_normalized * slope + offset, vertices))
    else:
        # scale and return the LH
        return x_normalized * slope + offset
예제 #31
0
def DOE(DOE_Seed, LHD_SampleSize, LHD_SamplingStrategy, IDM_path, DPM_path,
        LHD_iterations, RandomiseCFGs, Offset, InfoOnly, make_single_files,
        DPM_base_name, test_mode):
    np.random.seed(DOE_Seed)  #fix

    print("STARTING\n")

    #Load the Import Design Matrix
    IDM_f = open(IDM_path, encoding='utf-8')
    IDM = []
    csvReader = csv.reader(IDM_f, delimiter="\t")
    i = 0
    for row in csvReader:
        if (i >= 1):
            IDM.append(row)
        i += 1
    IDM_f.close()

    #Conv numerics to numerics
    for row in range(0, len(IDM)):
        IDM[row][1] = float(IDM[row][1])
        IDM[row][2] = float(IDM[row][2])
        IDM[row][3] = float(IDM[row][3])

    #Calculate the size of the full-factorial and the number of LHD parameters
    Indicator_DPM = [
    ]  #Hold information on the kind of the parameter in the final DPM
    Factorials = []  #Hold Factorial series, also that of FactPower
    LHD_factors = 0
    for row in range(0, len(IDM)):
        Indicator_DPM.append(IDM[row][4])
        if IDM[row][4] == "LHD":
            LHD_factors += 1
        elif IDM[row][4] == "Factorial" or IDM[row][4] == "Fact":
            val = IDM[row][1]
            tmp = [val]
            while (val + IDM[row][3] <= IDM[row][2]):
                val += IDM[row][3]
                tmp.append(val)
            tmp = np.asarray(tmp)
            #tmp = np.arange(IDM[row][1],IDM[row][2],IDM[row][3])
            Factorials.append([IDM[row][0], tmp])  #parname, #items
        elif IDM[row][4] == "FactPower":
            tmp = []
            pw = int(0)
            while (IDM[row][1] * IDM[row][3]**pw <= IDM[row][2]):
                tmp.append(IDM[row][1] * IDM[row][3]**pw)
                pw += 1
            Factorials.append([IDM[row][0], np.asarray(tmp)])  #parname, #items

    #Calculate the size of the LHD sample part
    SampleMult = 1
    if LHD_factors == 0:
        if (LHD_SampleSize < 0):
            SampleMult = round(-LHD_SampleSize)  #use as multiplicator
        LHD_SampleSize = 0
    elif LHD_factors == 1:
        print("It is not possible to have a single LHD factor.\n")
        print("Instead, consider to make it 'Fact' and add -N 20 (example) \
               to multiply the complete setting by 20.")
        return "ERROR"
    elif LHD_SampleSize < 0:  #implies -N argument
        LHD_SampleSize *= -LHD_factors
        LHD_SampleSize = round(LHD_SampleSize)

    #Calculate the size of the overal sample
    FactSampleSize = 1
    for item in range(len(Factorials)):
        FactSampleSize *= Factorials[item][1].size
    SampleSize = int(max(1, LHD_SampleSize) * FactSampleSize)
    if len(Factorials) > 0:
        print ("Factorial design with ",len(Factorials), " factors and ", \
            FactSampleSize, "configurations.\n")
    if LHD_factors > 0:
        print ("Latin Hyper Cube design with ", LHD_factors, " Factors and ",\
           LHD_SampleSize, " distinct design points for each\n")
    if SampleMult > 1:
        print("All is multiplied by ", SampleMult, ". \nNote: This makes only", \
              "sense if at least one random factor exists (like seed)\n")
    if test_mode > 0 and test_mode < SampleSize:
        print("\nTest mode selected. Sample Size is now:", test_mode)
    print("Overal sample size is: ", SampleSize * SampleMult)

    if (InfoOnly != 0):
        print("\nExit. Info only mode.\n")
        return "Done"

    #Create a reduced form design point matrix of the factorial factors.
    Fact_DPM = np.empty([FactSampleSize, len(Factorials)], "float64")
    loops = 1
    for col in range(0, len(Factorials)):
        row = 0
        while row < FactSampleSize:
            for item in range(Factorials[col][1].size):
                loop_count = 0
                while loop_count < loops:
                    Fact_DPM[row][col] = Factorials[col][1][item]
                    loop_count += 1
                    row += 1
        loops *= Factorials[col][1].size  #increase the number of repeated vals

    #select sampling method for LHD part
    if LHD_SampleSize > 0:
        print("Using strategy " + LHD_SamplingStrategy + " for the LHS")
        if (LHD_SamplingStrategy == "corr"
                or LHD_SamplingStrategy == "correlation"):
            print("with " + str(LHD_iterations) + " iterations \n")

        #Provide the LHD Matrix as raw
        if LHD_SamplingStrategy == "none":
            LHD_raw = pyDOE2.lhs(LHD_factors,
                                 samples=LHD_SampleSize,
                                 iterations=LHD_iterations)
        else:
            LHD_raw = pyDOE2.lhs(LHD_factors,
                                 samples=LHD_SampleSize,
                                 criterion=LHD_SamplingStrategy,
                                 iterations=LHD_iterations)

        #Print correlation matrix
        a = np.corrcoef(LHD_raw)
        print("Correlation Matrix:")
        print(a)

        #Normalise to values
        LHD_DPM = np.copy(LHD_raw)
        for row in range(LHD_SampleSize):
            LHD_col = 0
            for col in range(len(Indicator_DPM)):
                if Indicator_DPM[col] == "LHD":
                    LHD_DPM[row][LHD_col] *= (IDM[col][2] - IDM[col][1]
                                              )  #spannwidth
                    LHD_DPM[row][LHD_col] += IDM[col][1]  #add minimum
                    #Now, correct to step-size
                    LHD_DPM[row][LHD_col] = round(LHD_DPM[row][LHD_col] /
                                                  IDM[col][3])
                    LHD_DPM[row][LHD_col] *= IDM[col][3]
                    LHD_col += 1

    #Provide a "Header" for the DPM with Parameter Names and ConfigID
    DPM_header = []  #header
    for row in range(len(IDM)):
        DPM_header.insert(row, IDM[row][0])
    DPM_header.insert(0, "ABMAT_ConfigID")
    Indicator_DPM.insert(0, "ID")

    #Define the complete Design Point Matrix, un-normalised, samples*(Pars+1)
    DPM = np.empty([SampleSize * SampleMult, len(DPM_header)], "float64")
    row = 0
    for mult in range(SampleMult):  #multiply
        for LHD_row in range(max(LHD_SampleSize, 1)):
            #loop through LHD and for each design-vector add the complete factorial
            #sub-space
            for Fact_row in range(FactSampleSize):
                LHD_item = 0
                Fact_item = 0
                for col in range(len(Indicator_DPM)):
                    if Indicator_DPM[col] == "ID":
                        DPM[row][
                            col] = row + 1  #Set the ConfigID, starting with 1#
                    elif Indicator_DPM[col] == "LHD":
                        DPM[row][col] = LHD_DPM[LHD_row][LHD_item]
                        LHD_item += 1
                    elif Indicator_DPM[col] == "Factorial" or Indicator_DPM[
                            col] == "FactPower":
                        DPM[row][col] = Fact_DPM[Fact_row][Fact_item]
                        Fact_item += 1
                    elif Indicator_DPM[col] == "Fixed":
                        DPM[row][col] = IDM[col - 1][1]  #Minimum
                    elif Indicator_DPM[col] == "Random":
                        DPM[row][col] = IDM[col - 1][2] - IDM[col -
                                                              1][1]  #Span
                        DPM[row][col] *= np.random.uniform()  #randomise
                        DPM[row][col] += IDM[col - 1][1]  #add minimum
                        #Now, correct to step-size
                        DPM[row][col] = round(DPM[row][col] * IDM[col - 1][3])
                        DPM[row][col] /= IDM[col - 1][3]
                    else:
                        print("Error! Unknown Type of Variable: ", \
                              Indicator_DPM[col] )
                row += 1

    #mix the order? Is espescially important in case of distributed "packages"
    #of simulations and factorial designs, where the simulation time may vary
    #with the factorial's values. E.g. scale parameters.
    if (RandomiseCFGs == "Yes"):
        np.random.shuffle(DPM)
        for row in range(SampleSize * SampleMult):
            DPM[row][
                0] = row + 1 + Offset  #Assign the unique configuration ids
    """
    Save the DPM to a tab-separated file
    """

    os.makedirs(os.path.dirname(DPM_path + "\\"),
                exist_ok=True)  #Create dir if necessary
    sample = range(SampleSize * SampleMult)
    if (not make_single_files):
        #Save everything to a tsv file.
        out_file = DPM_path + "\\DPM_" + DPM_base_name + ".tsv"
        DPM_f = open(out_file, 'w', encoding='utf-8', newline='')
        csvWriter = csv.writer(DPM_f, delimiter="\t")
        csvWriter.writerow(DPM_header)
        if test_mode > 0 and test_mode < SampleSize * SampleMult:
            sample = random.sample(sample, test_mode)
        for row in sample:
            csvWriter.writerow(DPM[row])
        DPM_f.close()
    else:
        #create single tsv files        for row in range(SampleSize):
        if test_mode > 0 and test_mode < SampleSize * SampleMult:
            sample = random.sample(sample, test_mode)
            print(sample)
        for row in sample:
            out_file = DPM_path + "\\DPM_" + DPM_base_name + "_" + str(
                int(DPM[row][0])) + ".tsv"
            DPM_f = open(out_file, 'w', encoding='utf-8', newline='')
            csvWriter = csv.writer(DPM_f, delimiter="\t")
            csvWriter.writerow(DPM_header)
            csvWriter.writerow(DPM[row])
            DPM_f.close()

    return "DoneA"
예제 #32
0
def test_notebook_4():
    print("\n Notebook4 tested...")
    # Define SPM design parameters
    d_e = PositiveParameter("d_e", [50, 500], "mm", "External stator diameter")
    d_i = PositiveParameter("d_i", [20, 300], "mm", "Internal stator diameter")
    e_tooth = PositiveParameter("e_tooth", [3, 60], "mm", "Tooth thikness")
    e_yoke = PositiveParameter("e_yoke", [2, 20], "mm", "Yoke thikness")
    w_pm = PositiveParameter("w_pm", [2, 20], "mm", "Permanent magnet width")
    r_i = PositiveParameter("r_i", [5, 100], "mm", "Rotor internal radius")
    j = PositiveParameter("j", [0.1, 1000], "A/m**2",
                          "Winding current density")
    B_R = PositiveParameter("B_R", [1.1], "tesla",
                            "Permanent magnet remanence")
    B_SAT = PositiveParameter("B_SAT", [0.02], "tesla",
                              "Iron induction saturation")
    MU_0 = PositiveParameter("MU_0", [1.26e-6], "H/m", "Vacuum permeability")
    t_l = PositiveParameter("t_l", [0.01, 100], "N", "Linear torque")
    parameter_set = PositiveParameterSet(d_e, d_i, e_tooth, e_yoke, w_pm, r_i,
                                         j, B_R, B_SAT, MU_0, t_l)
    # Perform dimensional analysis on linear torque
    pi_set, _ = buckingham_theorem(parameter_set, False)
    # Define new parameters for joule losses definition
    p_jl = PositiveParameter("p_jl", [0.01, 1000], "W/m",
                             "Linear joule losses")
    RHO_WIND = PositiveParameter("RHO_WIND", [17000], "ohm*m",
                                 "Linear winding resistivity")
    s_wind = PositiveParameter("s_wind", [1, 100], "mm**2",
                               "Winding total cross section")
    parameter_set = PositiveParameterSet(d_e, d_i, e_tooth, e_yoke, w_pm, r_i,
                                         j, B_R, B_SAT, MU_0, RHO_WIND, s_wind,
                                         p_jl)
    # Perform dimensional analysis on joule losses
    pi_set, _ = buckingham_theorem(parameter_set, False)
    # Calculate levels
    bounds = numpy.array([[30, 150], [10, 100], [750, 3000]])
    doe_levels = lhs(3, samples=27, criterion="maximin", random_state=42)
    doe = bounds[:, 0] + doe_levels / doe_levels.max(axis=0) * (bounds[:, 1] -
                                                                bounds[:, 0])
    # Show matrix
    doe_data = pandas.DataFrame(doe, columns=["d_e", "h", "omega_max"])
    doe_data.to_excel("output.xls")
    os.remove("output.xls")
    doe_data.head(n=numpy.shape(doe)[0])
    # Plot 3D figure
    fig = plot.figure()
    ax = fig.add_subplot(111, projection="3d")
    ax.scatter(doe[:, 0], doe[:, 1], doe[:, 2])
    ax.set_xlabel("$D_e$")
    ax.set_ylabel("$h$")
    ax.set_zlabel("$\omega_{max}$")
    doe = pandas.read_excel("notebooks/04_motor_example/output.xls")
    # Declare directly the pi_set problem
    doePI = doe[[
        "pi01", "pi02", "pi03", "pi1", "pi2", "pi3", "pi4", "pi5", "pi6"
    ]].values
    pi1 = PositiveParameter("pi1", [0.1, 1], "", "t_l*b_r**-1*j**-1*d_e**-3")
    pi2 = PositiveParameter("pi2", [0.1, 1], "",
                            "p_j*rho_win**-1*d_e**-2*j**-2")
    pi3 = PositiveParameter(
        "pi3", [0.1, 1], "",
        "p_fe*delta_p**-1*omega_max**1.5*b_r**-2*d_iron**-1*d_e**-2")
    pi4 = PositiveParameter("pi4", [0.1, 1], "", "mu_0*j*d_e*b_r**-1")
    pi5 = PositiveParameter("pi5", [0.1, 1], "", "d_i*d_e**-1")
    pi6 = PositiveParameter("pi6", [0.1, 1], "", "e_tooth*d_e**-1*n")
    pi7 = PositiveParameter("pi7", [0.1, 1], "", "e_yoke*d_e**-1*n")
    pi8 = PositiveParameter("pi8", [0.1, 1], "", "w_pm*d_e**-1")
    pi9 = PositiveParameter("pi9", [0.1, 1], "", "r_i*d_e**-1")
    pi_set = PositiveParameterSet(pi1, pi2, pi3, pi4, pi5, pi6, pi7, pi8, pi9)
    # Perform sensitivity analysis
    pi_sensitivity(pi_set, doePI, useWidgets=False, test_mode=True)
    # Perform dependency analysis
    pi_dependency(pi_set, doePI, useWidgets=False, test_mode=True)