Beispiel #1
0
def obtenerTratamientos(niveles):
    num_factores = len(niveles)
    niveles_factor = {}
    for k in niveles:
        niveles_factor[k] = len(niveles[k])
    tratamientos = fullfact(niveles_factor.values())
    return tratamientos
Beispiel #2
0
def FactorialGenerator(levels, VarRanges):
    sampling = pyDOE.fullfact(
        levels)  #for each variable you input the level you want
    Designs = np.zeros([len(sampling), len(levels)])
    for i in range(0, len(levels)):
        Designs[:, i] = VarRanges[i, 0] + sampling[:, i] * (VarRanges[i, 1] -
                                                            VarRanges[i, 0])
    return Designs
Beispiel #3
0
def two_effect_data(e1=2, e2=2, n=3, **kwargs):
    x = np.linspace(-1, 1)[:, None]
    # y = np.zeros((50,(e1+e2)*n))
    effect = np.array(pyDOE.fullfact([e1, e2]).tolist() * n).astype(int)
    y = np.zeros((50, effect.shape[0]))

    m = gpfanova.fanova.FANOVA(x, y, effect, **kwargs)
    y, f_samples = m.samplePrior()

    return x, y, effect, f_samples
Beispiel #4
0
def generate(conf):
    """Generate base design by calling pyDOE"""
    # build list containing number levels for each factor (argument to pyDOE)
    levels = list()
    for factor in conf["factors"].keys():
        levels.append(len(conf["factors"][factor]))
    # call pyDOE method corresponding to conf["design"]
    if conf["design"] == "fullfact":
        base = pyDOE.fullfact(levels)
    return base
Beispiel #5
0
def two_effect_data(e1=2,e2=2,n=3,**kwargs):
	x = np.linspace(-1,1)[:,None]
	# y = np.zeros((50,(e1+e2)*n))
	effect = np.array(pyDOE.fullfact([e1,e2]).tolist()*n).astype(int)
	y = np.zeros((50,effect.shape[0]))

	m = gpfanova.fanova.FANOVA(x,y,effect,**kwargs)
	y,f_samples = m.samplePrior()

	return x,y,effect,f_samples
Beispiel #6
0
 def init(self):
     num_levels = []
     dvar = self.param_variation_model.design_variables
     for attr_name, variability in dvar.items():
         if not isinstance(
                 variability,
             (memosampler.NumericalLevels, memosampler.NonNumericalLevels)):
             raise ValueError('FullFactorial can only handle '
                              '"NumericalLevels"  or "NonNumericalLevels" '
                              'types of variability')
         num_levels.append(len(variability.levels))
     self.design = doe.fullfact(num_levels)
Beispiel #7
0
def multiple_effects(effects=[2,2],m=3,n=50,fullFactorial=True,seed=False,**kwargs):
	x = np.linspace(-1,1,n)[:,None]
	# y = np.zeros((50,(e1+e2)*n))

	if seed:
		np.random.seed(123)

	if fullFactorial:
		effect = np.array(pyDOE.fullfact(effects).tolist()*m).astype(int)
	else:
		effect = np.array([[np.random.choice(range(e)) for e in effects] for i in range(m)])
	y = np.zeros((n,effect.shape[0]))

	m = gpfanova.fanova.FANOVA(x,y,effect,**kwargs)
	y,f_samples = m.samplePrior()

	return x,y,effect,f_samples
Beispiel #8
0
 def fullfact(self,name=None,levels=[]):
     try:
         import pyDOE
     except ImportError as exc:
         sys.stderr.write("Warning: failed to import pyDOE module. ({})".format(exc))
         return
     if len(levels) == 0:
         levels = numpy.array([p.nvals for p in self.pars.values()])
     elif len(levels) != len(self.pars): 
         print "Error: Length of levels ("+str(len(levels))+") not equal to number of parameters ("+str(len(self.pars))+")"
         return
     else:
         levels = numpy.array(levels)
     ds = pyDOE.fullfact(levels)
     mns = numpy.array(self.parmins)
     mxs = numpy.array(self.parmaxs)
     parsets = mns + ds/(levels-1)*(mxs-mns)
     return self.create_sampleset(parsets, name=name)
 def doematrix(self):
     nf = self.spinBox.value()
     self.appExcel = xw.App()
     wb = xw.books.active
     sht = wb.sheets[0]
     if self.factorialradioButton.isChecked():
         mdoe = ff2n(nf)
     elif self.fullradioButton.isChecked():
         seq = self.lineEdit.text
         seq = seq.split(',')
         factors = len(seq)
         levels = []
         for i in range(factors):
             levels.extend({int(float(seq[i]))})
         mdoe = fullfact(levels)
     elif self.pbradioButton.isChecked():
         mdoe = pbdesign(nf)
     sht.range('A1').value = mdoe
Beispiel #10
0
    def generate(self, out_name='parameters.csv'):
        if os.path.exists(out_name):
            self.df = pd.read_csv(out_name)
        else:
            doe_option = self.DOE
            if doe_option == "one-to-one":
                self.max_len = max(
                    [len(para.values) for para in self.para_list])
                map(self.append_one2one, self.para_list)

            elif doe_option == "factorial":
                index_array = pyDOE.fullfact(
                    [len(para.values) for para in self.para_list])
                for ip, para in enumerate(self.para_list):
                    para.run_values = [
                        para.values[int(x)] for x in index_array[:, ip]
                    ]

            elif "lhs" in doe_option:
                nsamples = int(doe_option.split(',')[1])
                #scales = find_precision(nsamples)[0]
                scales = 5
                frac_array = pyDOE.lhs(
                    len(self.para_list),
                    samples=nsamples)  ## CDF values for norm(0, 1)
                for ip, para in enumerate(self.para_list):
                    para.run_values = [
                        round(para.min_ + x * (para.max_ - para.min_), scales)
                        for x in frac_array[:, ip]
                    ]

            else:
                print("I do nothing.")

            data = dict([(para.nickname, para.run_values)
                         for para in self.para_list])
            self.df = pd.DataFrame(data=data)
            self.df.to_csv(out_name, index=False)
        print("\n****Generated List of Parameters***")
        print(self.df.head())
        print("\n")
        return self.df.shape
Beispiel #11
0
def multiple_effects(effects=[2, 2],
                     m=3,
                     n=50,
                     fullFactorial=True,
                     seed=False,
                     **kwargs):
    x = np.linspace(-1, 1, n)[:, None]
    # y = np.zeros((50,(e1+e2)*n))

    if seed:
        np.random.seed(123)

    if fullFactorial:
        effect = np.array(pyDOE.fullfact(effects).tolist() * m).astype(int)
    else:
        effect = np.array([[np.random.choice(range(e)) for e in effects]
                           for i in range(m)])
    y = np.zeros((n, effect.shape[0]))

    m = gpfanova.fanova.FANOVA(x, y, effect, **kwargs)
    y, f_samples = m.samplePrior()

    return x, y, effect, f_samples
Beispiel #12
0
import pyDOE as doe
import numpy as np
import pandas as pd
from helperFunction.Func import createSeries

if __name__ == "__main__":
    layerThickness = range(150, 225, 25)
    linearVelocity = [25, 50, 75]
    rot = range(10, 180, 30)

    num = [len(layerThickness), len(linearVelocity), len(rot)]
    spread = np.array(doe.fullfact(num))

    layerThicknessSeries = createSeries(spread[0:, 0], layerThickness)
    linearVelocitySeries = createSeries(spread[0:, 1], linearVelocity)
    rotSeries = createSeries(spread[0:, 2], rot)

    d = {
        'layer_thickness': layerThicknessSeries,
        'linear_velocity': linearVelocitySeries,
        'rotation': rotSeries
    }

    df = pd.DataFrame(d)

    writer = pd.ExcelWriter('./bin/BinderJet.xlsx')
    df.to_excel(writer, 'Sheet1')
    writer.save()
    print(df)
Beispiel #13
0
def test_fullfact_numpy_float64_issue():
    doe.fullfact([2]*3)
Beispiel #14
0
def exhaustive(parameters, translator, tile_fixed_values):
	"""
	This function generates the doe plan for the experiment using of the whole design space

	Parameters:
		- parameters: the list of parameters generated by build_doe_space
		- translator: the translator dictionary generated by build_doe_space

	Return:
		- the plan of the execution composed as a dictionary:
			keys: the name of the build
			values: a list of tuples, where each tuple represents a single experiment
	"""

	# do not enforse constraints on parameter levels

	# count how many parameters have at least two levels
	dynamic_parameters = []
	constant_parameters = []
	levels = []
	for index, p in enumerate(parameters):
		if len(p) >= 2:
			dynamic_parameters.append(index)
			levels.append(len(p))
		else:
			constant_parameters.append(index)

	# prune the tile space
	constrained_parameters = {}
	for tile_p in tile_fixed_values:
		try:
			# consider the case where the tile size is fixed
			value = int(tile_fixed_values[tile_p])
			if translator['optimizations'][tile_p] in dynamic_parameters:
				dynamic_parameters.remove(translator['optimizations'][tile_p])
				constant_parameters.append(translator['optimizations'][tile_p])
				parameters[translator['optimizations'][tile_p]] = [value]

		except ValueError:
			# consider the case where the tile value depends on another tile
			if translator['optimizations'][tile_p] in dynamic_parameters:
				dynamic_parameters.remove(translator['optimizations'][tile_p])
				constrained_parameters[tile_p] = translator['optimizations'][tile_fixed_values[tile_p]]



	# generate the experiment matrix
	experiments = doe.fullfact(levels)

	# compose the plan
	plan = {}
	for e  in experiments:

		# get the value of all the dynamic values
		values = {}
		for index, v in enumerate(e):
			real_index = dynamic_parameters[index]
			values[real_index] = str(parameters[real_index][int(v)])

		# add back the tile values
		for tile_p in constrained_parameters:
			values[translator['optimizations'][tile_p]] = values[constrained_parameters[tile_p]]

		# add the constants
		for c in constant_parameters:
			values[c] = str(parameters[c][0]) # TODO: MPIbuild, take from MPI ?
			if values[c] == 'omp_threads':
				values[c] = values[translator['omp_threads']['omp']]
			if values[c] == 'mpi_procs':
				values[c] = values[translator['mpi_procs']['mpi']]

		# create the run list with the right values
		run = ['' for x in values]
		for key in values:
			run[int(key)] = values[key]


		# perform the flag translation
		make_args, exe_args, openmp_flag, mpi_command, name1, name2 = value_to_flag_translation(run, translator)

		# add it to the plan
		try:
			plan[make_args].append((exe_args, openmp_flag, mpi_command, name1, name2))
		except KeyError as itsok:
			plan[make_args] = [(exe_args, openmp_flag, mpi_command, name1, name2)]
		except:
			raise

	# return the plan
	return plan
Beispiel #15
0
def compute_stat(list_points,
                 weights,
                 Nbin=200,
                 rangeBin=None,
                 DoInterp=True,
                 DoHisto=False):
    """
    Compute the marginalized proba 1D and 2D for N-dimensions 
    - with an interpolation on a grid option
    - or simple histogram option
    
    Note
    ----
    - Main fuction for the triangular plot (c.f. plot_triangular) 
    
    Parameters
    ----------
    list_points : array_like[nPoints,NDIM,]
        Contain the sample of points.
    weights : array_like[nPoints,]
        The weights of each points.
    Nbin : Optional[int]
        Dimention of the final grid.
    DoInterp : Optional[bool]
        Interpolate the weights on a grid of dimention power(Nbin,NDIM)
    DoHisto : Optional[bool]
        Project on the weights on a grid of dimention power(Nbin,NDIM) 
    rangeBin : Optional[array_like[NDIM,2,]]
        The range of each dimentions. 
    
    Returns
    -------
    list_bins :
        list of the bins used for each axis.
    ND_pdf : 
        The power(Nbin,NDIM) cube of the pdf
    TwoD_proba :
        All the 2D pdf.
    OneD_proba :
        All the 1D pdf.
    axe_marg_2D.astype(int) :
        The order of axis used in the 2D pdf.
    axe_marg_1D.astype(int) :
        The order of axis used in the 1D pdf. 
    
    Raises
    ------
    None
    """

    DIM = len(list_points)

    ### Create bins of each dim
    list_bins = np.zeros([DIM, Nbin])
    for i, pts in enumerate(list_points):
        try:
            if rangeBin == None:
                list_bins[i] = np.linspace(pts.min(), pts.max(), num=Nbin)
        except ValueError:
            list_bins[i] = np.linspace(rangeBin[0, i],
                                       rangeBin[1, i],
                                       num=Nbin)

    if DoInterp:
        ### FOR GRID ESTIMATIONS
        ### ND mesh
        YYY = np.mgrid[[
            slice(m, M, Nbin * 1j)
            for m, M in zip(list_bins.min(axis=1), list_bins.max(axis=1))
        ]]

        ### ND parametter interpolate space
        ND_proba = griddata(list_points.T,
                            weights,
                            YYY.T,
                            method='linear',
                            fill_value=0.,
                            rescale=True)  ### nearest ### linear
    elif DoHisto:
        ### FOR MCMC CHAINS
        #print("IN DO HISTOGRAM")
        list_bins_tmp = np.zeros([DIM, Nbin + 1])
        for i, pts in enumerate(list_points):
            try:
                if rangeBin == None:
                    list_bins_tmp[i] = np.linspace(pts.min(),
                                                   pts.max(),
                                                   num=Nbin + 1)
            except ValueError:
                list_bins_tmp[i] = np.linspace(rangeBin[0, i],
                                               rangeBin[1, i],
                                               num=Nbin + 1)
        ND_proba, _ = np.histogramdd(list_points.T,
                                     weights=weights,
                                     bins=list_bins_tmp)
        ND_proba = ND_proba.T
    else:
        ### WHEN THE PROBA IS ALREADY COMPUTE
        #ND_proba = weights.reshape( [Nbin, Nbin, Nbin, Nbin, Nbin] )
        ND_proba = weights.reshape(np.ones(DIM, dtype=int) * Nbin)

    ND_proba[np.isnan(ND_proba)] = 0.  ### just in case
    print('NORMALIZATION FACTOR :', ND_proba.sum())
    ND_pdf = ND_proba.T / ND_proba.sum()  ### normalisation

    ### 2D planes : marginalised proba = n(n-1)/2
    TwoD_proba = np.zeros([DIM * (DIM - 1) // 2, Nbin, Nbin])
    axe_marg_2D = np.zeros([DIM * (DIM - 1) // 2, 2])
    t = pyDOE.fullfact(np.arange(3, DIM + 1))
    if DIM > 3:
        select = np.where(
            np.product(np.array([(t[:, d] < t[:, d + 1])
                                 for d in range(t.shape[1] - 1)]),
                       axis=0))[0]
        tselect = t[select]
    else:
        tselect = t
    for i, ax in enumerate(tselect):
        TwoD_proba[i] = np.squeeze(
            np.apply_over_axes(np.sum, ND_pdf, ax.astype(int)))
        axe_marg_2D[i] = np.setdiff1d(range(DIM), ax)

    ### 1D : marginalised proba = n
    OneD_proba = np.zeros([DIM, Nbin])
    axe_marg_1D = np.zeros(DIM)
    t = pyDOE.fullfact(np.arange(2, DIM + 1))
    if DIM > 2:
        select = np.where(
            np.product(np.array([(t[:, d] < t[:, d + 1])
                                 for d in range(t.shape[1] - 1)]),
                       axis=0))[0]
        tselect
    else:
        tselect = t
    for i, ax in enumerate(tselect):
        OneD_proba[i] = np.squeeze(
            np.apply_over_axes(np.sum, ND_pdf, ax.astype(int)))

        axe_marg_1D[i] = np.setdiff1d(range(DIM), ax)

    return list_bins, ND_pdf, TwoD_proba, OneD_proba, axe_marg_2D.astype(
        int), axe_marg_1D.astype(int)
Beispiel #16
0
##opt.set_inequality_constraint()
#print opt.optimize(params)
from scipy.optimize import *
#print ARLogLikelihood(t, s, p)

#print fmin_slsqp(objective, red_params(init_params), fprime=objective_grad, f_ieqcons=constraint, iter=1000)
#print minimize(loglikelihood, np.random.rand(Np), jac=lambda x: loglikelihood(x, True), method='BFGS', options={'maxiter':100})

bounds = np.vstack((np.hstack((-10 * np.ones((p, 1)), 10 * np.ones(
    (p, 1)))), np.hstack((0 * np.ones((n - 1, 1)), 1 * np.ones(
        (n - 1, 1)))), np.array([[0.1, 10]])))

npoints = 10
points = np.linspace(0, 1, npoints + 2)[1:-1]
import pyDOE
designs = pyDOE.fullfact([npoints] * (n + p)).astype(np.int32)
data = []
import cPickle as pkl
params = [red_params(init_params)]
for i, des in enumerate(designs):
    print i
    param = []
    for j in range(0, n + p):
        param.append(bounds[j][0] + points[des[j]] *
                     (bounds[j][1] - bounds[j][0]))
    if constraint(param) < 0:
        continue
    #result = objective(np.array(param))
    try:
        result = objective(np.array(param))
        data.append([param, result, constraint(param)])
Beispiel #17
0
def DesOpt(SysEq, x0, xU, xL, xDis=[], gc=[], hc=[], SensEq=[], Alg="SLSQP", SensCalc="FD", DesVarNorm=True,
           deltax=1e-3, StatusReport=False, ResultReport=False, Video=False, DoE=False, SBDO=False,
           Debug=False, PrintOut=True, OptNameAdd="", AlgOptions=[], Alarm=True):

#-----------------------------------------------------------------------------------------------------------------------
# Define optimization problem and optimization options
#-----------------------------------------------------------------------------------------------------------------------
    """
    :type OptNode: object
    """
    if Debug is True:
        StatusReport = False
        if StatusReport is True:
            print "Debug is set to True; overriding StatusReport"
        if ResultReport is True:
            print "Debug is set to True; overriding ResultReport"
        ResultReport = False
    computerName = platform.uname()[1]
    operatingSystem = platform.uname()[0]
    architecture = platform.uname()[4]
    nProcessors = str(multiprocessing.cpu_count())
    userName = getpass.getuser()
    OptTime0 = time.time()
    OptNodes = "all"
    MainDir = os.getcwd()
    if operatingSystem  != 'Windows':
        DirSplit = "/"
        homeDir = "/home/"
    else:
        DirSplit = "\\"
        homeDir = "c:\\Users\\"
    OptModel = os.getcwd().split(DirSplit)[-1]
    try:
        OptAlg = eval("pyOpt." + Alg + '()')
        pyOptAlg = True
    except:
        OptAlg = Alg
        pyOptAlg = False
    if hasattr(SensEq, '__call__'):
        SensCalc = "OptSensEq"
        print "Function for sensitivity analysis has been provided, overriding SensCalc to use function"
    else:
        pass
    StartTime = datetime.datetime.now()
    loctime = time.localtime()
    today = time.strftime("%B", time.localtime()) + ' ' + str(loctime[2]) + ', ' + str(loctime[0])
    if SBDO is True:
        OptNameAdd = OptNameAdd + "_SBDO"
    OptName = OptModel + OptNameAdd + "_" + Alg + "_" + StartTime.strftime("%Y%m%d%H%M%S")
    global nEval
    nEval = 0
    LocalRun = True
    ModelDir = os.getcwd()[:-(len(OptModel) + 1)]
    ModelFolder = ModelDir.split(DirSplit)[-1]
    DesOptDir = ModelDir[:-(len(ModelFolder) + 1)]
    ResultsDir = DesOptDir + os.sep + "Results"
    RunDir = DesOptDir + os.sep + "Run"
    try:
        inform
    except NameError:
        inform = ["Running"]
    if LocalRun is True and Debug is False:
        try: os.mkdir(ResultsDir)
        except: pass
        os.mkdir(ResultsDir + DirSplit + OptName)
        os.mkdir(ResultsDir + os.sep + OptName + os.sep + "ResultReport" + os.sep)
        shutil.copytree(os.getcwd(), RunDir + os.sep + OptName)
    #if SensCalc == "ParaFD":
    #    import OptSensParaFD
    #    os.system("cp -r ParaPythonFn " + homeDir + userName + "/DesOptRun/" + OptName)
    if LocalRun is True and Debug is False:
        os.chdir("../../Run/" + OptName + "/")
    sys.path.append(os.getcwd())


#-----------------------------------------------------------------------------------------------------------------------
#       Print start-up splash to output screen
#-----------------------------------------------------------------------------------------------------------------------
    if PrintOut is True:
        print("--------------------------------------------------------------------------------")
        PrintDesOptPy()
        print("")
        print("Optimization model:      " + OptModel)
        try: print("Optimization algorithm:  " + Alg)
        except: pass
        print("Optimization start:      " + StartTime.strftime("%Y%m%d%H%M"))
        print("Optimization name:       " + OptName)
        print("--------------------------------------------------------------------------------")



#-----------------------------------------------------------------------------------------------------------------------
#       Optimization problem
#-----------------------------------------------------------------------------------------------------------------------

#-----------------------------------------------------------------------------------------------------------------------
#       Define functions: system equation, normalization, etc.
#-----------------------------------------------------------------------------------------------------------------------
    def OptSysEq(x):
        x = np.array(x)  # NSGA2 gives a list back, this makes a float! TODO Inquire why it does this!
        f, g = SysEq(x, gc)
        fail = 0
        global nEval
        nEval += 1
        if StatusReport == True:
            OptHis2HTML.OptHis2HTML(OptName, OptAlg, DesOptDir, xL, xU, DesVarNorm, inform[0], OptTime0)
        if len(xDis) > 0:
            nD = len(xDis)
            gDis = [[]]*2*nD
            for ii in range(nD):
                gDis[ii+0] = np.sum(x[-1*xDis[ii]:])-1
                gDis[ii+1] = 1-np.sum(x[-1*xDis[ii]:])
            gNew = np.concatenate((g, gDis), 0)
            g = copy.copy(gNew)
        # TODO add print out for optimization development!!
        return f, g, fail

    def OptSysEqNorm(xNorm):
        xNorm = np.array(xNorm)  # NSGA2 gives a list back, this makes a float! TODO Inquire why it does this!
        x = denormalize(xNorm, xL, xU, DesVarNorm)
        f, g, fail = OptSysEq(x)
        return f, g, fail

    def OptPenSysEq(x):
        f, g, fail = OptSysEq(x)
        fpen = f
        return fpen

    def OptSensEq(x, f, g):
        dfdx, dgdx = SensEq(x, f, g, gc)
        dfdx = dfdx.reshape(1,len(x))
        fail = 0
        return dfdx, dgdx, fail

    def OptSensEqNorm(xNorm, f, g):
        x = denormalize(xNorm, xL, xU, DesVarNorm)
        dfxdx, dgxdx, fail = OptSensEq(x, f, g)
        dfdx = dfxdx * (xU - xL)
        # TODO not general for all normalizations! needs to be rewritten
        if dgxdx != []:
            dgdx = dgxdx * np.tile((xU - xL), [len(g), 1])
            # TODO not general for all normalizations! needs to be rewritten
        else:
            dgdx = []
        return dfdx, dgdx, fail

    def OptSensEqParaFD(x, f, g):
        global nEval
        dfdx, dgdx, nb = OptSensParaFD.Para(x, f, g, deltax, OptName, OptNodes)
        nEval += nb
        fail = 0
        return dfdx, dgdx, fail

    def OptSensEqParaFDNorm(xNorm, f, g):
        x = denormalize(xNorm, xL, xU, DesVarNorm)
        dfxdx, dgxdx, fail = OptSensEqParaFD(x, f, g)
        dfdx = dfxdx * (xU - xL)
        # TODO not general for all normalizations! needs to be rewritten
        dgdx = dgxdx * (np.tile((xU - xL), [len(g), 1]))
        # TODO not general for all normalizations! needs to be rewritten
        return dfdx, dgdx, fail

#-----------------------------------------------------------------------------------------------------------------------
#       Surrogate-based optimization (not fully functioning yet!!!!)
#-----------------------------------------------------------------------------------------------------------------------
    # TODO SBDO in a separate file???
    if SBDO is not False:
        if DoE > 0:
            import pyDOE
            try:
                n_gc = len(gc)
            except:
                n_gc = 1
            SampleCorners = True
            if SampleCorners is True:
                xTemp = np.ones(np.size(xL)) * 2
                xSampFF = pyDOE.fullfact(np.array(xTemp, dtype=int))  # Kriging needs boundaries too!!
                xSampLH = pyDOE.lhs(np.size(xL), DoE)
                xDoE_Norm = np.concatenate((xSampFF, xSampLH), axis=0)
            else:
                xDoE_Norm = pyDOE.lhs(np.size(xL), DoE)
            xDoE = np.zeros(np.shape(xDoE_Norm))
            fDoE = np.zeros([np.size(xDoE_Norm, 0), 1])
            gDoE = np.zeros([np.size(xDoE_Norm, 0), n_gc])
            for ii in range(np.size(xDoE_Norm, 0)):
                xDoE[ii] = denormalize(xDoE_Norm[ii], xL, xU, DesVarNorm)
                fDoEii, gDoEii, fail = OptSysEqNorm(xDoE_Norm[ii])
                fDoE[ii] = fDoEii
                gDoE[ii, :] = gDoEii
            n_theta = np.size(x0) + 1
            ApproxObj = "QuadReg"
            ApproxObj = "GaussianProcess"
            if ApproxObj == "GaussianProcess":
                from sklearn.gaussian_process import GaussianProcess

                approx_f = GaussianProcess(regr='quadratic', corr='squared_exponential',
                                           normalize=True, theta0=0.1, thetaL=1e-4, thetaU=1e+1,
                                           optimizer='fmin_cobyla')
            elif ApproxObj == "QuadReg":
                #                from PolyReg import *
                approx_f = PolyReg()
            approx_f.fit(xDoE, fDoE)
            from sklearn.gaussian_process import GaussianProcess

            gDoEr = np.zeros(np.size(xDoE_Norm, 0))
            approx_g = [[]] * n_gc
            gpRegr = ["quadratic"] * n_gc
            gpCorr = ["squared_exponential"] * n_gc
            for ii in range(n_gc):
                for iii in range(np.size(xDoE_Norm, 0)):
                    gDoEii = gDoE[iii]
                    gDoEr[iii] = gDoEii[ii]
                approx_g[ii] = GaussianProcess(regr=gpRegr[ii], corr=gpCorr[ii], theta0=0.01,
                                               thetaL=0.0001, thetaU=10., optimizer='fmin_cobyla')
                approx_g[ii].fit(xDoE, gDoEr)
            DoE_Data = {}
            DoE_Data['xDoE_Norm'] = xDoE_Norm
            DoE_Data['gDoE'] = gDoE
            DoE_Data['fDoE'] = fDoE
            output = open(OptName + "_DoE.pkl", 'wb')
            pickle.dump(DoE_Data, output)
            output.close()
        else:
            Data = pickle.load(open("Approx.pkl"))
            approx_f = Data["approx_f"]
            approx_g = Data["approx_g"]

    def ApproxOptSysEq(x):
        f = approx_f.predict(x)
        g = np.zeros(len(gc))
        for ii in range(len(gc)):
            # exec("g[ii], MSE = gp_g"+str(ii)+".predict(x, eval_MSE=True)")
            g[ii] = approx_g[ii].predict(x)
        # sigma = np.sqrt(MSE)
        fail = 0
        return f, g, fail

    def ApproxOptSysEqNorm(xNorm):
        xNorm = xNorm[0:np.size(xL), ]
        x = denormalize(xNorm, xL, xU, DesVarNorm)
        f = approx_f.predict(x)
        g = np.zeros(len(gc))
        for ii in range(len(gc)):
            # exec("g[ii], MSE = gp_g"+str(ii)+".predict(x, eval_MSE=True)")
            g[ii] = approx_g[ii].predict(x)
        # sigma = np.sqrt(MSE)
        fail = 0
        return f, g, fail
    if xDis is not []:
        for ii in range(np.size(xDis, 0)):
            xExpand0 = np.ones(xDis[ii]) * 1./xDis[ii]   # Start at uniform of all materials etc.
            xNew0 = np.concatenate((x0, xExpand0), 0)
            xExpandL = np.ones(xDis[ii]) * 0.0001
            xNewL = np.concatenate((xL, xExpandL), 0)
            xExpandU = np.ones(xDis[ii])
            xNewU = np.concatenate((xU, xExpandU), 0)
            x0 = copy.copy(xNew0)
            xL = copy.copy(xNewL)
            xU = copy.copy(xNewU)
            gcNew = np.concatenate((gc, np.ones(2,)), 0)
            gc = copy.copy(gcNew)
    if DesVarNorm in ["None", None, False]:
        x0norm = x0
        xLnorm = xL
        xUnorm = xU
        DefOptSysEq = OptSysEq
    else:
        [x0norm, xLnorm, xUnorm] = normalize(x0, xL, xU, DesVarNorm)
        DefOptSysEq = OptSysEqNorm
    nx = np.size(x0)
    ng = np.size(gc)

#-----------------------------------------------------------------------------------------------------------------------
#       pyOpt optimization
#-----------------------------------------------------------------------------------------------------------------------
    if pyOptAlg is True:
        if SBDO is not False and DesVarNorm  in ["xLxU", True, "xLx0", "x0", "xU"]: #in ["None", None, False]:
            OptProb = pyOpt.Optimization(OptModel, ApproxOptSysEqNorm, obj_set=None)
        elif SBDO is not False and DesVarNorm in ["None", None, False]:
            OptProb = pyOpt.Optimization(OptModel, ApproxOptSysEq, obj_set=None)
        else:
            OptProb = pyOpt.Optimization(OptModel, DefOptSysEq)
        if np.size(x0) == 1:
            OptProb.addVar('x', 'c', value=x0norm, lower=xLnorm, upper=xUnorm)
        elif np.size(x0) > 1:
            for ii in range(np.size(x0)):
                OptProb.addVar('x' + str(ii + 1), 'c', value=x0norm[ii], lower=xLnorm[ii], upper=xUnorm[ii])
        OptProb.addObj('f')
        if np.size(gc) == 1:
            OptProb.addCon('g', 'i')
            ng = 1
        elif np.size(gc) > 1:
            for ii in range(len(gc)):
                OptProb.addCon('g' + str(ii + 1), 'i')
            ng = ii + 1
        if np.size(hc) == 1:
            OptProb.addCon('h', 'i')
        elif np.size(hc) > 1:
            for ii in range(ng):
                OptProb.addCon('h' + str(ii + 1), 'i')
        if AlgOptions == []:
            AlgOptions = OptAlgOptions.setDefault(Alg)
        OptAlg = OptAlgOptions.setUserOptions(AlgOptions, Alg, OptName, OptAlg)
        #if AlgOptions == []:
        #    OptAlg = OptAlgOptions.setDefaultOptions(Alg, OptName, OptAlg)
        #else:
        #    OptAlg = OptAlgOptions.setUserOptions(AlgOptions, Alg, OptName, OptAlg)
        if PrintOut is True:
            print(OptProb)
        if Alg in ["MMA", "FFSQP", "FSQP", "GCMMA", "CONMIN", "SLSQP", "PSQP", "KSOPT", "ALGENCAN", "NLPQLP", "IPOPT"]:
            if SensCalc == "OptSensEq":
                if DesVarNorm  not in ["None", None, False]:
                    [fOpt, xOpt, inform] = OptAlg(OptProb, sens_type=OptSensEqNorm, store_hst=OptName)
                else:
                    [fOpt, xOpt, inform] = OptAlg(OptProb, sens_type=OptSensEq, store_hst=OptName)
            elif SensCalc == "ParaFD":  # Michi Richter
                if DesVarNorm  not in ["None", None, False]:
                    [fOpt, xOpt, inform] = OptAlg(OptProb, sens_type=OptSensEqParaFDNorm, store_hst=OptName)
                else:
                    [fOpt, xOpt, inform] = OptAlg(OptProb, sens_type=OptSensEqParaFD, store_hst=OptName)
            else:  # Here FD (finite differencing)
                [fOpt, xOpt, inform] = OptAlg(OptProb, sens_type=SensCalc, sens_step=deltax, store_hst=OptName)

        elif Alg in ["SDPEN", "SOLVOPT"]:
            [fOpt, xOpt, inform] = OptAlg(OptProb)
        else:
            [fOpt, xOpt, inform] = OptAlg(OptProb, store_hst=OptName)
        if PrintOut is True:
            try: print(OptProb.solution(0))
            except: pass
        if Alg not in ["PSQP", "SOLVOPT", "MIDACO", "SDPEN", "ralg"] and PrintOut is True:
            print(OptAlg.getInform(0))

#-----------------------------------------------------------------------------------------------------------------------
#       OpenOpt optimization -- not fully implemented in this framework and not yet working...
#-----------------------------------------------------------------------------------------------------------------------
    elif Alg == "ralg":
        from openopt import NLP
        f, g = lambda x: OptSysEq(x)
        # g = lambda x: OptSysEq(x)[1][0]
        p = NLP(f, x0, c=g, lb=xL, ub=xU, iprint=50, maxIter=10000, maxFunEvals=1e7, name='NLP_1')
        r = p.solve(Alg, plot=0)
        print(OptAlg.getInform(1))

#-----------------------------------------------------------------------------------------------------------------------
#       pyCMAES
#-----------------------------------------------------------------------------------------------------------------------
    elif Alg == "pycmaes":
        print "CMA-ES == not fully implemented in this framework"
        print "    no constraints"
        import cma
        def CMA_ES_ObjFn(x):
            f, g, fail = OptSysEq(x)
            return f
        OptRes = cma.fmin(CMA_ES_ObjFn, x0, sigma0=1)
        xOpt = OptRes[0]
        fOpt = OptRes[1]
        nEval = OptRes[4]
        nIter = OptRes[5]
#-----------------------------------------------------------------------------------------------------------------------
#       MATLAB fmincon optimization -- not fully implemented in this framework and not yet working...
#-----------------------------------------------------------------------------------------------------------------------
    elif Alg == "fmincon":  # not fully implemented in this framework
        def ObjFn(x):
            f, g, fail = OptSysEqNorm(xNorm)
            return f, []
        from mlabwrap import mlab
        mlab._get(ObjFn)
        mlab.fmincon(mlab._get("ObjFn"), x)      # g,h, dgdx = mlab.fmincon(x.T,cg,ch, nout=3)

#-----------------------------------------------------------------------------------------------------------------------
#       PyGMO optimization
#-----------------------------------------------------------------------------------------------------------------------
    elif Alg[:5] == "PyGMO":
        DesVarNorm = "None"
        #print nindiv
        dim = np.size(x0)
        # prob = OptSysEqPyGMO(dim=dim)
        prob = OptSysEqPyGMO(SysEq=SysEq, xL=xL, xU=xU, gc=gc, dim=dim, OptName=OptName, Alg=Alg, DesOptDir=DesOptDir,
                             DesVarNorm=DesVarNorm, StatusReport=StatusReport, inform=inform, OptTime0=OptTime0)
        # prob = problem.death_penalty(prob_old, problem.death_penalty.method.KURI)
        if AlgOptions == []:
            AlgOptions = OptAlgOptions.setDefault(Alg)
        OptAlg = OptAlgOptions.setUserOptions(AlgOptions, Alg, OptName, OptAlg)
        #algo = eval("PyGMO.algorithm." + Alg[6:]+"()")

        #de (gen=100, f=0.8, cr=0.9, variant=2, ftol=1e-06, xtol=1e-06, screen_output=False)
        #NSGAII (gen=100, cr=0.95, eta_c=10, m=0.01, eta_m=10)
        #sga_gray.__init__(gen=1, cr=0.95, m=0.02, elitism=1, mutation=PyGMO.algorithm._algorithm._gray_mutation_type.UNIFORM, selection=PyGMO.algorithm._algorithm._gray_selection_type.ROULETTE, crossover=PyGMO.algorithm._algorithm._gray_crossover_type.SINGLE_POINT)
        #nsga_II.__init__(gen=100, cr=0.95, eta_c=10, m=0.01, eta_m=10)
        #emoa  (hv_algorithm=None, gen=100, sel_m=2, cr=0.95, eta_c=10, m=0.01, eta_m=10)
        #pade  (gen=10, max_parallelism=1, decomposition=PyGMO.problem._problem._decomposition_method.BI, solver=None, T=8, weights=PyGMO.algorithm._algorithm._weight_generation.LOW_DISCREPANCY, z=[])
        #nspso (gen=100, minW=0.4, maxW=1.0, C1=2.0, C2=2.0, CHI=1.0, v_coeff=0.5, leader_selection_range=5, diversity_mechanism=PyGMO.algorithm._algorithm._diversity_mechanism.CROWDING_DISTANCE)
        #corana: (iter=10000, Ts=10, Tf=0.1, steps=1, bin_size=20, range=1)

        #if Alg[6:] in ["de", "bee_colony", "nsga_II", "pso", "pso_gen", "cmaes", "py_cmaes",
        #               "spea2", "nspso", "pade", "sea", "vega", "sga", "sga_gray", "de_1220",
        #               "mde_pbx", "jde"]:
        #    algo.gen = ngen
        #elif Alg[6:] in ["ihs", "monte_carlo", "sa_corana"]:
        #    algo.iter = ngen
        #elif Alg[6:] == "sms_emoa":
        #    print "sms_emoa not working"
        #else:
        #    sys.exit("improper PyGMO algorithm chosen")
        #algo.f = 1
        #algo.cr=1
        #algo.ftol = 1e-3
        #algo.xtol = 1e-3
        #algo.variant = 2
        #algo.screen_output = False
        #if Alg == "PyGMO_de":
        #    algo = PyGMO.algorithm.de(gen=ngen, f=1, cr=1, variant=2,
        #                              ftol=1e-3, xtol=1e-3, screen_output=False)
        #else:
        #    algo = PyGMO.algorithm.de(gen=ngen, f=1, cr=1, variant=2,
        #                              ftol=1e-3, xtol=1e-3, screen_output=False)
        #pop = PyGMO.population(prob, nIndiv)
        #pop = PyGMO.population(prob, nIndiv, seed=13598)  # Seed fixed for random generation of first individuals
        #algo.evolve(pop)
        isl = PyGMO.island(OptAlg, prob, AlgOptions.nIndiv)
        isl.evolve(1)
        isl.join()
        xOpt = isl.population.champion.x
        # fOpt = isl.population.champion.f[0]
        nEval = isl.population.problem.fevals
        nGen = int(nEval/AlgOptions.nIndiv)  # currently being overwritten and therefore not being used
        StatusReport = False  # turn off status report, so not remade (and destroyed) in following call!
        fOpt, gOpt, fail = OptSysEq(xOpt)  # verification of optimal solution as values above are based on penalty!

#-----------------------------------------------------------------------------------------------------------------------
#        SciPy optimization
#-----------------------------------------------------------------------------------------------------------------------
    elif Alg[:5] == "scipy":
        import scipy.optimize as sciopt
        bounds = [[]]*len(x0)
        for ii in range(len(x0)):
            bounds[ii] = (xL[ii], xU[ii])
        print bounds
        if Alg[6:] == "de":
            sciopt.differential_evolution(DefOptSysEq, bounds, strategy='best1bin',
                                          maxiter=None, popsize=15, tol=0.01, mutation=(0.5, 1),
                                          recombination=0.7, seed=None, callback=None, disp=False,
                                          polish=True, init='latinhypercube')

#-----------------------------------------------------------------------------------------------------------------------
#        Simple optimization algorithms to demonstrate use of custom algorithms
#-----------------------------------------------------------------------------------------------------------------------
    #TODO: add history to these
    elif Alg == "SteepestDescentSUMT":
        from CustomAlgs import SteepestDescentSUMT
        fOpt, xOpt, nIter, nEval = SteepestDescentSUMT(DefOptSysEq, x0, xL, xU)
    elif Alg == "NewtonSUMT":
        from CustomAlgs import NewtonSUMT
        fOpt, xOpt, nIter, nEval = NewtonSUMT(DefOptSysEq, x0, xL, xU)

#-----------------------------------------------------------------------------------------------------------------------
#
#-----------------------------------------------------------------------------------------------------------------------
    else:
        sys.exit("Error on line 694 of __init__.py: algorithm misspelled or not supported")

#-----------------------------------------------------------------------------------------------------------------------
#       Optimization post-processing
#-----------------------------------------------------------------------------------------------------------------------
    if StatusReport == 1:
        OptHis2HTML.OptHis2HTML(OptName, OptAlg, DesOptDir, xL, xU, DesVarNorm, inform.values()[0], OptTime0)
    OptTime1 = time.time()
    loctime0 = time.localtime(OptTime0)
    hhmmss0 = time.strftime("%H", loctime0)+' : '+time.strftime("%M", loctime0)+' : '+time.strftime("%S", loctime0)
    loctime1 = time.localtime(OptTime1)
    hhmmss1 = time.strftime("%H", loctime1)+' : '+time.strftime("%M", loctime1)+' : '+time.strftime("%S", loctime1)
    diff = OptTime1 - OptTime0
    h0, m0, s0 = (diff // 3600), int((diff / 60) - (diff // 3600) * 60), diff % 60
    OptTime = "%02d" % (h0) + " : " + "%02d" % (m0) + " : " + "%02d" % (s0)

#-----------------------------------------------------------------------------------------------------------------------
#       Read in results from history files
#-----------------------------------------------------------------------------------------------------------------------
    OptHist = pyOpt.History(OptName, "r")
    fAll = OptHist.read([0, -1], ["obj"])[0]["obj"]
    xAll = OptHist.read([0, -1], ["x"])[0]["x"]
    gAll = OptHist.read([0, -1], ["con"])[0]["con"]
    if Alg == "NLPQLP":
        gAll = [x * -1 for x in gAll]
    gGradIter = OptHist.read([0, -1], ["grad_con"])[0]["grad_con"]
    fGradIter = OptHist.read([0, -1], ["grad_obj"])[0]["grad_obj"]
    failIter = OptHist.read([0, -1], ["fail"])[0]["fail"]
    if Alg == "COBYLA" or Alg == "NSGA2" or Alg[:5] == "PyGMO":
        fIter = fAll
        xIter = xAll
        gIter = gAll
    else:
        fIter = [[]] * len(fGradIter)
        xIter = [[]] * len(fGradIter)
        gIter = [[]] * len(fGradIter)
        # SuIter = [[]] * len(fGradIter)
        for ii in range(len(fGradIter)):
            Posdg = OptHist.cues["grad_con"][ii][0]
            Posf = OptHist.cues["obj"][ii][0]
            iii = 0
            while Posdg > Posf:
                iii = iii + 1
                try:
                    Posf = OptHist.cues["obj"][iii][0]
                except:
                    Posf = Posdg + 1
            iii = iii - 1
            fIter[ii] = fAll[iii]
            xIter[ii] = xAll[iii]
            gIter[ii] = gAll[iii]
    OptHist.close()

#-----------------------------------------------------------------------------------------------------------------------
#       Convert all data to numpy arrays
#-----------------------------------------------------------------------------------------------------------------------
    fIter = np.asarray(fIter)
    xIter = np.asarray(xIter)
    gIter = np.asarray(gIter)
    gGradIter = np.asarray(gGradIter)
    fGradIter = np.asarray(fGradIter)

#-----------------------------------------------------------------------------------------------------------------------
# Denormalization of design variables
#-----------------------------------------------------------------------------------------------------------------------
    xOpt = np.resize(xOpt[0:np.size(xL)], np.size(xL))
    if DesVarNorm in ["None", None, False]:
        x0norm = []
        xIterNorm = []
        xOptNorm = []
    else:
        xOpt = np.resize(xOpt, [np.size(xL), ])
        xOptNorm = xOpt
        xOpt = denormalize(xOptNorm.T, xL, xU, DesVarNorm)
        try:
            xIterNorm = xIter[:, 0:np.size(xL)]
            xIter = np.zeros(np.shape(xIterNorm))
            for ii in range(len(xIterNorm)):
                xIter[ii] = denormalize(xIterNorm[ii], xL, xU, DesVarNorm)
        except:
            x0norm = []
            xIterNorm = []
            xOptNorm = []
    nIter = np.size(fIter)
    if np.size(fIter) > 0:
        if fIter[0] != 0:
            fIterNorm = fIter / fIter[0]  # fIterNorm=(fIter-fIter[nEval-1])/(fIter[0]-fIter[nEval-1])
        else:
            fIterNorm = fIter
    else:
        fIterNorm = []

#-----------------------------------------------------------------------------------------------------------------------
#  Active constraints for use in the calculation of the Lagrangian multipliers and optimality criterion
#-----------------------------------------------------------------------------------------------------------------------
    epsActive = 1e-3
    xL_ActiveIndex = (xOpt - xL) / xU < epsActive
    xU_ActiveIndex = (xU - xOpt) / xU < epsActive
    xL_Grad = -np.eye(nx)
    xU_Grad = np.eye(nx)
    xL_GradActive = xL_Grad[:, xL_ActiveIndex]
    xU_GradActive = xU_Grad[:, xU_ActiveIndex]  # or the other way around!
    xGradActive = np.concatenate((xL_GradActive, xU_GradActive), axis=1)
    # TODO change so that 1D optimization works!
    try:
        xL_Active = xL[xL_ActiveIndex]
    except:
        xL_Active = np.array([])
    try:
        xU_Active = xU[xU_ActiveIndex]
    except:
        xU_Active = np.array([])
    if len(xL_Active)==0:
        xActive = xU_Active
    elif len(xU_Active)==0:
        xActive = xL_Active
    else:
        xActive = np.concatenate((xL_Active, xU_Active))
    if np.size(xL) == 1:
        if xL_ActiveIndex == False:
            xL_Active = np.array([])
        else:
            xL_Active = xL
        if xU_ActiveIndex == False:
            xU_Active = np.array([])
        else:
            xU_Active = xU
    else:
        xL_Active = xL[xL_ActiveIndex]
        xU_Active = np.array(xU[xU_ActiveIndex])
    if len(xL_Active)==0:
        xLU_Active = xU_Active
    elif len(xU_Active)==0:
        xLU_Active = xL_Active
    else:
        xLU_Active = np.concatenate((xL_Active, xU_Active))
    #TODO needs to be investigated for PyGMO!
    # are there nonlinear constraints active, in case equality constraints are added later, this must also be added
    if np.size(gc) > 0 and Alg[:5] != "PyGMO":
        gMaxIter = np.zeros([nIter])
        for ii in range(len(gIter)):
            gMaxIter[ii] = max(gIter[ii])
        gOpt = gIter[nIter - 1]
        gOptActiveIndex = gOpt > -epsActive
        gOptActive = gOpt[gOpt > -epsActive]
    elif np.size(gc) == 0:
        gOptActiveIndex = [[False]] * len(gc)
        gOptActive = np.array([])
        gMaxIter = np.array([] * nIter)
        gOpt = np.array([])
    else:
        gMaxIter = np.zeros([nIter])
        for ii in range(len(gIter)):
            gMaxIter[ii] = max(gIter[ii])
        gOptActiveIndex = gOpt > -epsActive
        gOptActive = gOpt[gOpt > -epsActive]
    if len(xLU_Active)==0:
        g_xLU_OptActive = gOptActive
    elif len(gOptActive)==0:
        g_xLU_OptActive = xLU_Active
    else:
        if np.size(xLU_Active) == 1 and np.size(gOptActive) == 1:
            g_xLU_OptActive = np.array([xLU_Active, gOptActive])
        else:
            g_xLU_OptActive = np.concatenate((xLU_Active, gOptActive))
    if np.size(fGradIter) > 0:
        #fGradOpt = fGradIter[nIter - 1]
        fGradOpt = fGradIter[-1]
        if np.size(gc) > 0:
            gGradOpt = gGradIter[nIter - 1]
            gGradOpt = gGradOpt.reshape([ng, nx]).T
            gGradOptActive = gGradOpt[:, gOptActiveIndex == True]
            try:
                cOptActive = gc[gOptActiveIndex == True]
                cActiveType = ["Constraint"]*np.size(cOptActive)
            except:
                cOptActive = []
                cActiveType = []
            if np.size(xGradActive) == 0:
                g_xLU_GradOptActive = gGradOptActive
                c_xLU_OptActive = cOptActive
                c_xLU_ActiveType = cActiveType
            elif np.size(gGradOptActive) == 0:
                g_xLU_GradOptActive = xGradActive
                c_xLU_OptActive = xActive
                c_xLU_ActiveType = ["Bound"]*np.size(xActive)
            else:
                g_xLU_GradOptActive = np.concatenate((gGradOptActive, xGradActive), axis=1)
                c_xLU_OptActive = np.concatenate((cOptActive, xActive))
                xActiveType = ["Bound"]*np.size(xActive)
                c_xLU_ActiveType = np.concatenate((cActiveType, xActiveType))
        else:
            g_xLU_GradOptActive = xGradActive
            gGradOpt = np.array([])
            c_xLU_OptActive = np.array([])
            g_xLU_GradOptActive = np.array([])
            c_xLU_ActiveType = np.array([])

    else:
        fGradOpt = np.array([])
        gGradOpt = np.array([])
        g_xLU_GradOptActive = np.array([])
        c_xLU_OptActive = np.array([])
        c_xLU_ActiveType = np.array([])

#-----------------------------------------------------------------------------------------------------------------------
#   §      Post-processing of optimization solution
#-----------------------------------------------------------------------------------------------------------------------
    lambda_c, SPg, OptRes, Opt1Order, KKTmax = OptPostProc(fGradOpt, gc, gOptActiveIndex, g_xLU_GradOptActive,
                                                           c_xLU_OptActive, c_xLU_ActiveType, DesVarNorm)

#-----------------------------------------------------------------------------------------------------------------------
#   §      Save optimizaiton solution to file
#-----------------------------------------------------------------------------------------------------------------------
    OptSolData = {}
    OptSolData['x0'] = x0
    OptSolData['xOpt'] = xOpt
    OptSolData['xOptNorm'] = xOptNorm
    OptSolData['xIter'] = xIter
    OptSolData['xIterNorm'] = xIterNorm
    OptSolData['fOpt'] = fOpt
    OptSolData['fIter'] = fIter
    OptSolData['fIterNorm'] = fIterNorm
    OptSolData['gIter'] = gIter
    OptSolData['gMaxIter'] = gMaxIter
    OptSolData['gOpt'] = gOpt
    OptSolData['fGradIter'] = fGradIter
    OptSolData['gGradIter'] = gGradIter
    OptSolData['fGradOpt'] = fGradOpt
    OptSolData['gGradOpt'] = gGradOpt
    OptSolData['OptName'] = OptName
    OptSolData['OptModel'] = OptModel
    OptSolData['OptTime'] = OptTime
    OptSolData['loctime'] = loctime
    OptSolData['today'] = today
    OptSolData['computerName'] = computerName
    OptSolData['operatingSystem'] = operatingSystem
    OptSolData['architecture'] = architecture
    OptSolData['nProcessors'] = nProcessors
    OptSolData['userName'] = userName
    OptSolData['Alg'] = Alg
    OptSolData['DesVarNorm'] = DesVarNorm
    OptSolData['KKTmax'] = KKTmax
    OptSolData['lambda_c'] = lambda_c
    OptSolData['nEval'] = nEval
    OptSolData['nIter'] = nIter
    OptSolData['SPg'] = SPg
    OptSolData['gc'] = gc
    #OptSolData['OptAlg'] = OptAlg
    OptSolData['SensCalc'] = SensCalc
    OptSolData['xIterNorm'] = xIterNorm
    OptSolData['x0norm'] = x0norm
    OptSolData['xL'] = xL
    OptSolData['xU'] = xU
    OptSolData['ng'] = ng
    OptSolData['nx'] = nx
    OptSolData['Opt1Order'] = Opt1Order
    OptSolData['hhmmss0'] = hhmmss0
    OptSolData['hhmmss1'] = hhmmss1


#-----------------------------------------------------------------------------------------------------------------------
#   §    Save in Python format
#-----------------------------------------------------------------------------------------------------------------------
    output = open(OptName + "_OptSol.pkl", 'wb')
    pickle.dump(OptSolData, output)
    output.close()
    np.savez(OptName + "_OptSol", x0, xOpt, xOptNorm, xIter, xIterNorm, xIter, xIterNorm, fOpt, fIter, fIterNorm, gIter,
             gMaxIter, gOpt, fGradIter, gGradIter,
             fGradOpt, gGradOpt, OptName, OptModel, OptTime, loctime, today, computerName, operatingSystem,
             architecture, nProcessors, userName, Alg, DesVarNorm, KKTmax)

#-----------------------------------------------------------------------------------------------------------------------
#   §5.2    Save in MATLAB format
#-----------------------------------------------------------------------------------------------------------------------
    #OptSolData['OptAlg'] = []
    spio.savemat(OptName + '_OptSol.mat', OptSolData, oned_as='row')



#-----------------------------------------------------------------------------------------------------------------------
#
#-----------------------------------------------------------------------------------------------------------------------
    os.chdir(MainDir)
    if LocalRun is True and Debug is False:
        try:
            shutil.move(RunDir + os.sep + OptName,
                        ResultsDir + os.sep + OptName + os.sep + "RunFiles" + os.sep)
        # except WindowsError:
        except:
            print "Run files not deleted from " + RunDir + os.sep + OptName
            shutil.copytree(RunDir + os.sep + OptName,
                            ResultsDir + os.sep + OptName + os.sep + "RunFiles" + os.sep)

#-----------------------------------------------------------------------------------------------------------------------
#   §    Graphical post-processing
#-----------------------------------------------------------------------------------------------------------------------
    if ResultReport is True:
        print("Entering preprocessing mode")
        OptResultReport.OptResultReport(OptName, OptAlg, DesOptDir, diagrams=1, tables=1, lyx=1)
        # try: OptResultReport.OptResultReport(OptName, diagrams=1, tables=1, lyx=1)
        # except: print("Problem with generation of Result Report. Check if all prerequisites are installed")
    if Video is True:
        OptVideo.OptVideo(OptName)


#-----------------------------------------------------------------------------------------------------------------------
#   § Print out
#-----------------------------------------------------------------------------------------------------------------------
    if PrintOut is True:
        print("")
        print("--------------------------------------------------------------------------------")
        print("Optimization results - DesOptPy")
        print("--------------------------------------------------------------------------------")
        print("Optimization with " + Alg)
        print("g* = " + str(gOpt))
        print("x* = " + str(xOpt.T))
        print("f* = " + str(fOpt))
        print("Lagrangian multipliers = " + str(lambda_c))
        print("Shadow prices = " + str(SPg))
        try:
            print("nGen = " + str(nGen))
        except:
            print("nIter = " + str(nIter))
        print("nEval = " + str(nEval))
        print("Time of optimization [h:m:s] = " + OptTime)
        if Debug is False:
            print("See results directory: " + ResultsDir + os.sep + OptName + os.sep)
        else:
            print("Local run, no results saved to results directory")
        print("--------------------------------------------------------------------------------")
        if operatingSystem == "Linux" and Alarm is True:
            t = 1
            freq = 350
            os.system('play --no-show-progress --null --channels 1 synth %s sine %f' % (t, freq))
    return xOpt, fOpt, SPg
Beispiel #18
0
"""
Created on Tue May 18 20:43:05 2021

@author: denis
"""

from random import randint
from math import floor, log
import pandas as pd
import numpy as np
from pyDOE import fullfact
import seaborn as sns
import matplotlib.pyplot as plt

#Se crea el experimento factorial y se guarda en una matriz
fd = fullfact([3, 3, 3])
fd = fd / 2

repeticion = 10
#Usamos la matriz del experimento factorial
FS = []
for f in fd:
    for replica in range(repeticion):
        modelos = pd.read_csv('digits.txt', sep=' ', header=None)
        modelos = modelos.replace({'n': f[0], 'g': f[1], 'b': f[2]})
        r, c = 7, 5
        dim = r * c

        tasa = 0.15
        tranqui = 0.99
        tope = 24