def __init__(self, objectiveFunctions, subproblems, CPV_lb, CPV_ub, CPV_validity_checks, sampleSizes, resampling_interruption_confidence = 0.80, resampling_interruption_mode = 'reduce_max', OFE_assessment_overshoot_function = linearFunction(1.2, 100), OFE_assessment_undershoot_function = linearFunction(0.0, 0), process_batch = _passfunction, saveTo = None, saveInterval = -1, printFunction = to_stdout, printLevel = 2, record_X_hist = True, normalize_objective_values = True, post_iteration_function = _passfunction ): """ See MOTA help for information on parameters """ self.T_start = datetime.datetime.now() self.objectiveFunctions = [ _timingWrapper(f) for f in objectiveFunctions ] assert all( isinstance(sp,MOTA_subproblem) for sp in subproblems ) assert any( sp.active() for sp in subproblems ) self.subproblems = subproblems #if len(subproblems) < 4: # raise ValueError, "MOTA requires at least 4 subproblems. Consider using the subproblem duplicate function. i.e. sp.duplicate(5)" self.n_f = len(objectiveFunctions) self.CPV_lb = CPV_lb self.CPV_ub = CPV_ub self.n_x = len(CPV_lb) + 1 self.CPV_validity_checks = CPV_validity_checks self.sampleSizes = sampleSizes # optional parameters self.resampling_interruption_confidence = resampling_interruption_confidence assert resampling_interruption_mode in ['reduce_max','check_all'] self.resampling_interruption_mode = resampling_interruption_mode self.OFE_assessment_overshoot_function = OFE_assessment_overshoot_function self.OFE_assessment_undershoot_function = OFE_assessment_undershoot_function self.process_batch = _timingWrapper(process_batch) self.printFunction = printFunction self.printLevel = printLevel self.saveTo = saveTo self.saveProgress = saveTo <> None self.saveInterval = saveInterval self.record_X_hist = record_X_hist self.normalize_objective_values = normalize_objective_values self.post_iteration_function = post_iteration_function # initialization self.it = 0 self.transform_utopia_point = numpy.ones(len(objectiveFunctions)) * numpy.inf self.transform_nadir_point = -numpy.ones(len(objectiveFunctions)) * numpy.inf if record_X_hist: for sp in self.subproblems: sp.X_history = [] self.evaluate_candidate_designs_stats = [] self.continueOptimisation()
import DE_tuning_setup from optTune import tMOPSO, linearFunction from matplotlib import pyplot tuningOpt = tMOPSO(optAlg=DE_tuning_setup.run_DE_on_Ros_ND, CPV_lb=DE_tuning_setup.CPV_lb, CPV_ub=DE_tuning_setup.CPV_ub, CPV_validity_checks=DE_tuning_setup.CPV_validity_checks, OFE_budgets=DE_tuning_setup.OFE_budgets_to_tune_under, sampleSizes=DE_tuning_setup.sampleSizes, resampling_interruption_confidence=0.6, gammaBudget=DE_tuning_setup.tuningBudget, OFE_assessment_overshoot_function=linearFunction(2, 100), N=10, saveTo='tMOPSO_tuning_DE.data') print(tuningOpt) #extracting data from the Pareto-optimal front Approximation OFE_budgets = [d.fv[0] for d in tuningOpt.PFA.designs] Fmin_values = [d.fv[1] for d in tuningOpt.PFA.designs] log_OFE_budgets = [d.xv[0] for d in tuningOpt.PFA.designs] N_values = [int(d.xv[1]) for d in tuningOpt.PFA.designs] Cr_values = [d.xv[2] for d in tuningOpt.PFA.designs] F_values = [d.xv[3] for d in tuningOpt.PFA.designs] line_Cr = pyplot.semilogx(OFE_budgets, Cr_values, 'b^')[0] line_F = pyplot.semilogx(OFE_budgets, F_values, 'rx')[0] pyplot.ylabel('Cr, F') pyplot.twinx() line_N = pyplot.semilogx(OFE_budgets, N_values, 'go')[0]
def __init__(self, optAlg, CPV_lb, CPV_ub, CPV_validity_checks, sampleSizes, gammaBudget, OFE_budgets = None, OFE_max = None, extra_termination_critea = [], N = 10, w = 0.2, c_g = 2.0, c_p = 2.0, c_beta = 0.1, resampling_interruption_confidence = 0.9, resampling_interruption_mode = 'reduce_max', OFE_assessment_overshoot_function = linearFunction(1.5, 100 ), OFE_assessment_undershoot_function = linearFunction(0, 0), constrain_to_initilization_bounds = False, saveTo = None, saveInterval = 10, paretoArchive_local_use_history_info = True, printFunction = to_stdout, printLevel = 2, addtoBatch = _passfunction, processBatch = _passfunction, post_iteration_function = _passfunction , record_V_hist = True, record_X_hist = True, ): """ Required Args * optAlg - function which calls optimization algorithm or numerical method and returns two lists. The first list optAlg should return the utility measure such as the solution error (i.e f_found_opt - f_min) and should be decreasing, and the second list the number of objective function evaluations (OFEs) used in order to determine each element in the first list and should be increasing. The input arguments passed from tMOPSO to optAlg are ( numpy.array([CPV_1, CPV_2, ..., ]), OFE_budgets, randomSeed ). If OFE_budgets is a numpy.array then a solution error for each value in the list is desired, else if OFE_budgets is a integer, the solutions errors for every iteration up until OFE_budgets is desired. The type of OFE_budgets depends upon the OFE_budgets == None, if so then integer, else values from OFE_budgets is passed into optAlg. * CPV_lb - initialization lower bound for CPV tuning, i.e. numpy.array([CPV_1_init_lb, CPV_2_init_lb, ...]) * CPV_ub - numpy.array([CPV_1_init_ub, CPV_2_init_ub, ...]) * CPV_validity_checks - function used to check validity of candidate CPVs. Usage CPV_validity_checks(CPV_array, OFE_budget) returns tuple (valid, msg) where msg is string of why invalid. Should be a cheap function, as candidate CPVs are regenerated if they do not satisfy it. Use to prevent negative population sizes, populations size larger then OFE_budget checks, etcetera. * sampleSizes - sample sizes used to generate and refine CPV utility values. For example if the sampleSizes are [5,15,30] then all candidate CPVs will be sampled 5 times, then the possibly not dominated CPVs are then sampled another 15 times, and if still promising the for another 30 times. CPV making it to the final iteration are therefore averaged over 50 independent runs. * gammaBudget - the number of application layer evaluations (evaluation of the function optAlg optimizes) allocated for the tuning. NB include repeats, i.e. assessing optAlg for on OFE budget of 100 at 5 repeats, counts as a gamma of 500. * OFE_budgets - numpy.array of OFE budgets for which the optAlg is to be tuned under. If None then algorithm is tuned under every OFE budget upto OFE_max. * OFE_max - maximum OFE budget of interest. Need not if specified if OFE_budgets specified Optional Args * extra_termination_critea - termination criteria in addition to gammaBudget termination criteria. * N - tMOPSO population size * w - tMOPSO particle inertia factor * c_g - parameter controlling the attraction towards the global guide * c_p - parameter controlling the attraction towards the particles personal guide * c_beta - particle target OFE budget perturbation factor [0,1], influences each particle velocity in the OFE budget dimension, and the local and global guide selection points. * resampling_interruption_confidence - re-sampling interruption confidence level used by paretoArchive2D * resampling_interruption_mode - choices=['reduce_max', 'check_all'] * OFE__assessment_overshoot_function - when assessing a CPV tuple for a OFE budget of beta, this factor is used to control overshoot, beta_actual = OFE__assessment_undershot_function(beta) * OFE__assessment_undershoot_function - like OFE__assessment_overshoot_function except control minimum value * saveTo - save optimization to this file after the optimization has been complete, at the interval specified by save_interval, use None to disable saving * saveInterval - optimization is saved every `save_interval` iterations. Zero for no saving during optimization * boundConstrained - should CPV be constrained between initialization bounds CPV_lb and CPV_ub * paretoArchive_local_use_history_info - use history info from solution error calculations to update Particles local approximations of the Pareto front. This boolean if True, may speed up tuning by increasing the quality of the each particles approximation of the Pareto Front. However, do this increase the computational overhead of tMOPSO. * printLevel - 0 no printing, 1 only warnings, 2 overall info, 3 lots of info, 4 verbose (not implemented) * addtoBatch - optAlg inputs are passed to this function before optAlg is called, * processBatch - function is called after all addtoBatch calls have been made, and before any optAlg calls have been made. If used then optAlg, should be retrieve solutions from this functions results * post_iteration_function - at the end of each iteration this function is called with tMOPSO instance as the only arg. """ self.T_start = datetime.datetime.now() self.initializationArgs = locals() # required parameters self.optAlg = _timingWrapper(optAlg) assert OFE_budgets <> None or OFE_max <> None self.OFE_budgets = OFE_budgets if OFE_budgets == None: self.x_init_lb = numpy.array([0] + list(CPV_lb)) self.x_init_ub = numpy.array([numpy.log(OFE_max)] + list(CPV_ub)) else: self.x_init_lb = numpy.array([ numpy.log(min(OFE_budgets))] + list(CPV_lb) ) self.x_init_ub = numpy.array([ numpy.log(max(OFE_budgets))] + list(CPV_ub) ) self.CPV_validity_checks = CPV_validity_checks self.n_dim = len(self.x_init_lb) self.sampleSizes = sampleSizes self.gammaBudget = gammaBudget # optional parameters self.extra_termination_critea = extra_termination_critea self.N = N self.w = w self.c_g = c_g self.c_p = c_p self.c_beta = c_beta self.icl = resampling_interruption_confidence self.resampling_interruption_mode = resampling_interruption_mode self.OFE_assessment_overshoot_function = OFE_assessment_overshoot_function self.OFE_assessment_undershoot_function = OFE_assessment_undershoot_function self.constrain_to_initilization_bounds = constrain_to_initilization_bounds self.PFA = paretoArchive2D_MWUT() #global guide self.paretoArchive_local_use_history_info = paretoArchive_local_use_history_info self.optAlg_addtoBatch = addtoBatch self.optAlg_processBatch = _timingWrapper(processBatch) self.saveInterval = saveInterval self.printFunction = printFunction self.printLevel = printLevel if saveTo <> None : self.saveProgress = True self.saveTo = saveTo else : self.saveProgress = False self.post_iteration_function = post_iteration_function self.record_V_hist = record_V_hist self.record_X_hist = record_X_hist # additional stuff self.it = 0 self.localGuides = [ paretoArchive2D() for i in range(N) ] self.log_beta_max = self.x_init_ub[0] self.OFE_budget_max = int(numpy.exp(self.log_beta_max )) #max OFE budget for algorithm being tuned self.OFE_budget_min = int(numpy.exp(self.x_init_lb[0])) self.optAlg_evals_made = 0 self.evaluate_candidate_designs_stats = [] self.PFA_history = PFA_history_recorder() self.V_history = [] self.X_history = [] self.continueOptimisation()
if CPVs[0] < 5: return False,'dwell,CPVs[0] < 5' if CPVs[1] < 0.0001: return False,'CPVs[1] < 0.0001' return True,'' tuningOpt = tMOPSO( optAlg = run_simulated_annealing, CPV_lb = numpy.array([10, 0.0]), CPV_ub = numpy.array([50, 5.0]), CPV_validity_checks = CPV_valid, OFE_budgets=numpy.logspace(1,3,30).astype(int), sampleSizes = [2,8,20], #resampling size of 30 resampling_interruption_confidence = 0.6, gammaBudget = 30*1000*50, #increase to get a smoother result ... OFE_assessment_overshoot_function = linearFunction(2, 100 ), N = 10, ) print(tuningOpt) Fmin_values = [ d.fv[1] for d in tuningOpt.PFA.designs ] OFE_budgets = [ d.fv[0] for d in tuningOpt.PFA.designs ] dwell_values = [ int(d.xv[1]) for d in tuningOpt.PFA.designs ] m_values = [ d.xv[2] for d in tuningOpt.PFA.designs ] print('OFE budget Fmin dwell m ') for a,b,c,d in zip(OFE_budgets, Fmin_values, dwell_values, m_values): print(' %i %6.4f %i %4.2f' % (a,b,c,d)) from matplotlib import pyplot
def __init__(self, objectiveFunctions, subproblems, CPV_lb, CPV_ub, CPV_validity_checks, sampleSizes, DE_F=2, DE_Cr=0.7, OFE_purtibation_factor = 0.2, OFE_assessment_overshoot_function = linearFunction(1.2, 100), OFE_assessment_undershoot_function = linearFunction(0.0, 0), resampling_interruption_confidence = 0.80, resampling_interruption_mode = 'reduce_max', boundConstrained=False, process_batch = _passfunction, saveTo = None, saveInterval = -1, printFunction = to_stdout, printLevel = 2, record_X_hist = True, normalize_objective_values = True, post_iteration_function = _passfunction, DE_F_vector_mutation=True, polynomial_similarity_mode = -1, simularity_exploitation_factor = 2, simularity_fitting_threshold_ratio = 0.2, ): """ Required Args * objectiveFunctions - contains the list of tuning objective functions. Each tuning objective (f) takes 3 arguments (CPV_array, assessment_OFE_budgets, randomSeed). f must returns two lists. The first list should return the utility measure such as the solution error (i.e f_found_opt - f_min) and should be decreasing, and the second list the number of objective function evaluations (OFEs) used in order to determine each element in the first list and should be increasing, and should if possible match the assessment_OFE_budgets (not required though). These lists can be thought of as the optimizer's history. If an objective function has the addtoBatch attribute, each (CPV_array, assessment_OFE_budgets, randomSeed) about to be evaluated in passed to that function. Then the process batch_function is called, after which the objective function is called with the same input given to addtoBatch. * subproblems - list of MOTA_sub_problems. * CPV_lb - initialization lower bound for CPV tuning, i.e. numpy.array([CPV_1_init_lb, CPV_2_init_lb, ...]) * CPV_ub - numpy.array([CPV_1_init_ub, CPV_2_init_ub, ...]) * CPV_validity_checks - function used to check validity of candidate CPVs. Usage CPV_validity_checks(CPV_array, OFE_budget) returns tuple (valid, msg) where msg is string of why invalid. Should be a cheap function, as candidate CPVs are regenerated if they do not satisfy it. Use to prevent negative population sizes, populations size larger then OFE_budget checks, etcetera. * sampleSizes - sample sizes used to generate and refined CPV utility values. For example if the sampleSizes are [5,15,30] all candidate CPVs will be sampled 5 times, the possible not dominated CPVs are then sampled another 15 times, and if still promising another 30 times. CPV which returned performances are therefore averaged over 50 independent instances. Optional Args * resampling_interruption_confidence - confidence level for interrupting the sample gathering process when performing re-sampling. Must be greater than 0.5 and less than or equal to 1 * resampling_interruption_mode - 'reduce_max' or 'check_all'. * OFE_purtubation_factor - when generating new candidate CPVs, OFEs close to the target OFE budget + 0.25 * rand_gausion* OFE_purtubation_factor * (log(max OFE budget)-log(min OFE budget)) are considered * OFE_assessment_overshoot_function - when assessing a CPV tuple for a OFE budget of beta, this factor is used to control overshoot, beta_actual = OFE__assessment_undershot_function(beta) * OFE_assessment_undershoot_function - similar to OFE__assessment_overshoot_function except controls minimum value * DE_F - DE scaling factor * DE_F_vector_mutation - change 'DE_F*(x_1 - x_2)' to 'r()*DE_F*(x_1 - x_2)' where r is vector consting randomly generated elements between 0 and 1, using a uniform distibution. Recommended, else diversity problem at start of MOTA optimization. * DE_Cr - DE crossover probability * boundConstrained - should CPV tuning search be bound constrainted between CPV_lb and CPV_ub * process_batch - function is called after all the add_to_batch_functions have been called. * saveTo - filename used to save optimization progress (one save per itteration), use None to disable saving * saveInterval - number of iterations after which the progress of the tuning should be saved, negative numbers indicate do not saving during optimization. * printLevel - 0 no printing, 1 only warnings, 2 overall info, 3 lots of info, 4 verbose (not implemented) * printFunction - output is parsed into this function * polynomial_similarity_mode - if an integer >= 1, then this CPV determines the order of the polynomial to be fitted for the similarity between polynomial fits of the paretoFronts (PFs) to construct the neighbourhood used for generating new candidate designs. A value of 0 indicates override_inter_PF_normalization, and use the update neighborhood for generating new candidate designs. A value of -1 indicates override_inter_PF_normalization, and every subproblem for generating new candidate designs. override_inter_PF_normalization - do not perform scaling correct based upon the fitted polynomials, during candidate CPV generation when using a CPV from a different PF to the target subproblem. * simularity_exploitation_factor - controls how much information sharing take place between subproblems as a function of there similarity between their PFAs. Example values, 10 sharing only when very simular PFAs, 0 share equally regardless of simularity, -5 share with PFAs most dissimular. If function then, simularity_explotation_factor_iteration = simularity_exploitation_factor (subproblem.gamma / subproblem.gammaBudget) * simularity_fitting_threshold_ratio - set the maximum scaling range, as (CPV_ub - CPV_lb)*simularity_scaling_threshold * normalize_objective_values - normalize objective values so that utopia point is approximately 0 and nadir point is approximately 1 for all objectives * post_iteration_function - at the end of each iteration this function is called with MOTA instance as the only arg. """ self.T_start = datetime.datetime.now() self.objectiveFunctions = [ _timingWrapper(f) for f in objectiveFunctions ] assert all( isinstance(sp,MOTA_subproblem) for sp in subproblems ) assert any( sp.active() for sp in subproblems ) self.subproblems = subproblems #if len(subproblems) < 4: # raise ValueError, "MOTA requires at least 4 subproblems. Consider using the subproblem duplicate function. i.e. sp.duplicate(5)" self.n_f = len(objectiveFunctions) self.CPV_lb = CPV_lb self.CPV_ub = CPV_ub self.n_x = len(CPV_lb) + 1 self.CPV_validity_checks = CPV_validity_checks self.sampleSizes = sampleSizes # optional parameters self.resampling_interruption_confidence = resampling_interruption_confidence assert resampling_interruption_mode in ['reduce_max','check_all'] self.resampling_interruption_mode = resampling_interruption_mode self.simularity_exploitation_factor = simularity_exploitation_factor self.simularity_threshold = simularity_fitting_threshold_ratio * (CPV_ub - CPV_lb) self.OFE_purtibation_factor = OFE_purtibation_factor self.OFE_assessment_overshoot_function = OFE_assessment_overshoot_function self.OFE_assessment_undershoot_function = OFE_assessment_undershoot_function self.DE_F = DE_F self.DE_F_vector_mutation = DE_F_vector_mutation self.DE_Cr = DE_Cr self.boundConstrained = boundConstrained self.process_batch = _timingWrapper(process_batch) self.printFunction = printFunction self.printLevel = printLevel self.saveTo = saveTo self.saveProgress = saveTo <> None self.saveInterval = saveInterval self.record_X_hist = record_X_hist assert polynomial_similarity_mode in [-1, 0, 1, 2, 3, 4] self.polynomial_similarity_mode = polynomial_similarity_mode if self.polynomial_similarity_mode >= 1 : for s in self.subproblems: s.PFA.poly_fit_order = polynomial_similarity_mode self.normalize_objective_values = normalize_objective_values self.post_iteration_function = post_iteration_function # initialization self.it = 0 self.CPV_min_changes = numpy.abs( CPV_ub - CPV_lb ) / 10**6 self.transform_utopia_point = numpy.ones(len(objectiveFunctions)) * numpy.inf self.transform_nadir_point = -numpy.ones(len(objectiveFunctions)) * numpy.inf if record_X_hist: for sp in self.subproblems: sp.X_history = [] self.evaluate_candidate_designs_stats = [] self.continueOptimisation()
def __init__(self, objectiveFunctions, subproblems, CPV_lb, CPV_ub, CPV_validity_checks, sampleSizes, resampling_interruption_confidence=0.80, resampling_interruption_mode='reduce_max', OFE_assessment_overshoot_function=linearFunction(1.2, 100), OFE_assessment_undershoot_function=linearFunction(0.0, 0), process_batch=_passfunction, saveTo=None, saveInterval=-1, printFunction=to_stdout, printLevel=2, record_X_hist=True, normalize_objective_values=True, post_iteration_function=_passfunction): """ See MOTA help for information on parameters """ self.T_start = datetime.datetime.now() self.objectiveFunctions = [ _timingWrapper(f) for f in objectiveFunctions ] assert all(isinstance(sp, MOTA_subproblem) for sp in subproblems) assert any(sp.active() for sp in subproblems) self.subproblems = subproblems #if len(subproblems) < 4: # raise ValueError, "MOTA requires at least 4 subproblems. Consider using the subproblem duplicate function. i.e. sp.duplicate(5)" self.n_f = len(objectiveFunctions) self.CPV_lb = CPV_lb self.CPV_ub = CPV_ub self.n_x = len(CPV_lb) + 1 self.CPV_validity_checks = CPV_validity_checks self.sampleSizes = sampleSizes # optional parameters self.resampling_interruption_confidence = resampling_interruption_confidence assert resampling_interruption_mode in ['reduce_max', 'check_all'] self.resampling_interruption_mode = resampling_interruption_mode self.OFE_assessment_overshoot_function = OFE_assessment_overshoot_function self.OFE_assessment_undershoot_function = OFE_assessment_undershoot_function self.process_batch = _timingWrapper(process_batch) self.printFunction = printFunction self.printLevel = printLevel self.saveTo = saveTo self.saveProgress = saveTo <> None self.saveInterval = saveInterval self.record_X_hist = record_X_hist self.normalize_objective_values = normalize_objective_values self.post_iteration_function = post_iteration_function # initialization self.it = 0 self.transform_utopia_point = numpy.ones( len(objectiveFunctions)) * numpy.inf self.transform_nadir_point = -numpy.ones( len(objectiveFunctions)) * numpy.inf if record_X_hist: for sp in self.subproblems: sp.X_history = [] self.evaluate_candidate_designs_stats = [] self.continueOptimisation()
def __init__( self, objectiveFunctions, subproblems, CPV_lb, CPV_ub, CPV_validity_checks, sampleSizes, DE_F=2, DE_Cr=0.7, OFE_purtibation_factor=0.2, OFE_assessment_overshoot_function=linearFunction(1.2, 100), OFE_assessment_undershoot_function=linearFunction(0.0, 0), resampling_interruption_confidence=0.80, resampling_interruption_mode='reduce_max', boundConstrained=False, process_batch=_passfunction, saveTo=None, saveInterval=-1, printFunction=to_stdout, printLevel=2, record_X_hist=True, normalize_objective_values=True, post_iteration_function=_passfunction, DE_F_vector_mutation=True, polynomial_similarity_mode=-1, simularity_exploitation_factor=2, simularity_fitting_threshold_ratio=0.2, ): """ Required Args * objectiveFunctions - contains the list of tuning objective functions. Each tuning objective (f) takes 3 arguments (CPV_array, assessment_OFE_budgets, randomSeed). f must returns two lists. The first list should return the utility measure such as the solution error (i.e f_found_opt - f_min) and should be decreasing, and the second list the number of objective function evaluations (OFEs) used in order to determine each element in the first list and should be increasing, and should if possible match the assessment_OFE_budgets (not required though). These lists can be thought of as the optimizer's history. If an objective function has the addtoBatch attribute, each (CPV_array, assessment_OFE_budgets, randomSeed) about to be evaluated in passed to that function. Then the process batch_function is called, after which the objective function is called with the same input given to addtoBatch. * subproblems - list of MOTA_sub_problems. * CPV_lb - initialization lower bound for CPV tuning, i.e. numpy.array([CPV_1_init_lb, CPV_2_init_lb, ...]) * CPV_ub - numpy.array([CPV_1_init_ub, CPV_2_init_ub, ...]) * CPV_validity_checks - function used to check validity of candidate CPVs. Usage CPV_validity_checks(CPV_array, OFE_budget) returns tuple (valid, msg) where msg is string of why invalid. Should be a cheap function, as candidate CPVs are regenerated if they do not satisfy it. Use to prevent negative population sizes, populations size larger then OFE_budget checks, etcetera. * sampleSizes - sample sizes used to generate and refined CPV utility values. For example if the sampleSizes are [5,15,30] all candidate CPVs will be sampled 5 times, the possible not dominated CPVs are then sampled another 15 times, and if still promising another 30 times. CPV which returned performances are therefore averaged over 50 independent instances. Optional Args * resampling_interruption_confidence - confidence level for interrupting the sample gathering process when performing re-sampling. Must be greater than 0.5 and less than or equal to 1 * resampling_interruption_mode - 'reduce_max' or 'check_all'. * OFE_purtubation_factor - when generating new candidate CPVs, OFEs close to the target OFE budget + 0.25 * rand_gausion* OFE_purtubation_factor * (log(max OFE budget)-log(min OFE budget)) are considered * OFE_assessment_overshoot_function - when assessing a CPV tuple for a OFE budget of beta, this factor is used to control overshoot, beta_actual = OFE__assessment_undershot_function(beta) * OFE_assessment_undershoot_function - similar to OFE__assessment_overshoot_function except controls minimum value * DE_F - DE scaling factor * DE_F_vector_mutation - change 'DE_F*(x_1 - x_2)' to 'r()*DE_F*(x_1 - x_2)' where r is vector consting randomly generated elements between 0 and 1, using a uniform distibution. Recommended, else diversity problem at start of MOTA optimization. * DE_Cr - DE crossover probability * boundConstrained - should CPV tuning search be bound constrainted between CPV_lb and CPV_ub * process_batch - function is called after all the add_to_batch_functions have been called. * saveTo - filename used to save optimization progress (one save per itteration), use None to disable saving * saveInterval - number of iterations after which the progress of the tuning should be saved, negative numbers indicate do not saving during optimization. * printLevel - 0 no printing, 1 only warnings, 2 overall info, 3 lots of info, 4 verbose (not implemented) * printFunction - output is parsed into this function * polynomial_similarity_mode - if an integer >= 1, then this CPV determines the order of the polynomial to be fitted for the similarity between polynomial fits of the paretoFronts (PFs) to construct the neighbourhood used for generating new candidate designs. A value of 0 indicates override_inter_PF_normalization, and use the update neighborhood for generating new candidate designs. A value of -1 indicates override_inter_PF_normalization, and every subproblem for generating new candidate designs. override_inter_PF_normalization - do not perform scaling correct based upon the fitted polynomials, during candidate CPV generation when using a CPV from a different PF to the target subproblem. * simularity_exploitation_factor - controls how much information sharing take place between subproblems as a function of there similarity between their PFAs. Example values, 10 sharing only when very simular PFAs, 0 share equally regardless of simularity, -5 share with PFAs most dissimular. If function then, simularity_explotation_factor_iteration = simularity_exploitation_factor (subproblem.gamma / subproblem.gammaBudget) * simularity_fitting_threshold_ratio - set the maximum scaling range, as (CPV_ub - CPV_lb)*simularity_scaling_threshold * normalize_objective_values - normalize objective values so that utopia point is approximately 0 and nadir point is approximately 1 for all objectives * post_iteration_function - at the end of each iteration this function is called with MOTA instance as the only arg. """ self.T_start = datetime.datetime.now() self.objectiveFunctions = [ _timingWrapper(f) for f in objectiveFunctions ] assert all(isinstance(sp, MOTA_subproblem) for sp in subproblems) assert any(sp.active() for sp in subproblems) self.subproblems = subproblems #if len(subproblems) < 4: # raise ValueError, "MOTA requires at least 4 subproblems. Consider using the subproblem duplicate function. i.e. sp.duplicate(5)" self.n_f = len(objectiveFunctions) self.CPV_lb = CPV_lb self.CPV_ub = CPV_ub self.n_x = len(CPV_lb) + 1 self.CPV_validity_checks = CPV_validity_checks self.sampleSizes = sampleSizes # optional parameters self.resampling_interruption_confidence = resampling_interruption_confidence assert resampling_interruption_mode in ['reduce_max', 'check_all'] self.resampling_interruption_mode = resampling_interruption_mode self.simularity_exploitation_factor = simularity_exploitation_factor self.simularity_threshold = simularity_fitting_threshold_ratio * ( CPV_ub - CPV_lb) self.OFE_purtibation_factor = OFE_purtibation_factor self.OFE_assessment_overshoot_function = OFE_assessment_overshoot_function self.OFE_assessment_undershoot_function = OFE_assessment_undershoot_function self.DE_F = DE_F self.DE_F_vector_mutation = DE_F_vector_mutation self.DE_Cr = DE_Cr self.boundConstrained = boundConstrained self.process_batch = _timingWrapper(process_batch) self.printFunction = printFunction self.printLevel = printLevel self.saveTo = saveTo self.saveProgress = saveTo <> None self.saveInterval = saveInterval self.record_X_hist = record_X_hist assert polynomial_similarity_mode in [-1, 0, 1, 2, 3, 4] self.polynomial_similarity_mode = polynomial_similarity_mode if self.polynomial_similarity_mode >= 1: for s in self.subproblems: s.PFA.poly_fit_order = polynomial_similarity_mode self.normalize_objective_values = normalize_objective_values self.post_iteration_function = post_iteration_function # initialization self.it = 0 self.CPV_min_changes = numpy.abs(CPV_ub - CPV_lb) / 10**6 self.transform_utopia_point = numpy.ones( len(objectiveFunctions)) * numpy.inf self.transform_nadir_point = -numpy.ones( len(objectiveFunctions)) * numpy.inf if record_X_hist: for sp in self.subproblems: sp.X_history = [] self.evaluate_candidate_designs_stats = [] self.continueOptimisation()