def main(): bench = Benchmark() for num_function in range(INITIAL_FUNCTION, LAST_FUNCTION + 1): info = bench.get_info(num_function) print(f'\nFunction {num_function}: {info}') for i in range(INITIAL_EJECUTION, LAST_EJECUTION + 1): BKS = info['best'] D = info['dimension'] NP = 30 N_Gen = 5000 A = 0.95 r = 0.1 alpha = 0.9 gamma = 0.5 fmin = 0 fmax = 1 Lower = info['lower'] Upper = info['upper'] ObjetiveFunction = bench.get_function(num_function) name_ejecution_file = f'function{num_function}_{i}.csv' name_logs_file = 'Logs/' + name_ejecution_file name_cluster_file = 'Logs/clusters/' + name_ejecution_file bats = BatAlgorithm(num_function, i, BKS, D, NP, N_Gen, A, r, alpha, gamma, fmin, fmax, Lower, Upper, ObjetiveFunction) bats.move_bats(name_logs_file, name_cluster_file, ORIGINAL_MH, 100)
def CC_exe(Dim, func_num, NIND, Max_iteration, scale_range, groups, method): bench = Benchmark() function = bench.get_function(func_num) name = 'f' + str(func_num) print(name, 'Optimization with', method) """The next is DE optimization""" best_indexes, best_obj_trace = DE.CC(Dim, NIND, Max_iteration, function, scale_range, groups) help.write_obj_trace(name, method, best_obj_trace)
def DECC_CL_exe(Dim, func_num, NIND, Max_iteration, scale_range, groups_One, groups_Lasso, Lasso_cost, candidate, method): bench = Benchmark() function = bench.get_function(func_num) EFs = 3000000 name = 'f' + str(func_num) print(name, 'Optimization with', method) """The next is DE optimization""" best_indexes, best_obj_trace_CC, Population, up, down, cost = DE.DECC_CL_CCDE(Dim, NIND, Max_iteration, function, scale_range, groups_One, candidate)
def go_lsgo(func_id): from cec2013lsgo.cec2013 import Benchmark bench = Benchmark() bench_info = bench.get_info(func_id) dim = bench_info['dimension'] return bench.get_function(func_id), None, [bench_info['lower']] * dim, [ bench_info['upper'] ] * dim
def DECC_CL_exe(Dim, func_num, NIND, scale_range, groups_One, groups_Lasso, Lasso_cost, method): bench = Benchmark() function = bench.get_function(func_num) EFs = 3000000 name = 'f' + str(func_num) print(name, 'Optimization with', method) """The next is DE optimization""" best_indexes, best_obj_trace_CC, cost, up, down = DE.DECC_CL_CCDE(Dim, NIND, int((EFs - Lasso_cost) / (NIND * Dim)), function, scale_range, groups_One) central_point = best_indexes[len(best_indexes)-1] best_indexes, best_obj_trace_CL = DE.DECC_CL_DECC_L(Dim, NIND, int((EFs-Lasso_cost-cost)/(NIND*Dim)), function, up, down, groups_Lasso, central_point) help.write_obj_trace(name, method, best_obj_trace_CC+best_obj_trace_CL)
def DECC_D(func_num, groups_num=10, max_number=100): bench = Benchmark() function = bench.get_function(func_num) benchmark_summary = bench.get_info(func_num) scale_range = [benchmark_summary['lower'], benchmark_summary['upper']] Dim = 1000 groups = k_s(10, max_number) delta = [0] * Dim NIND = 10000 for i in range(len(groups)): delta[i * max_number:(i + 1) * max_number] = DE.OptTool( Dim, NIND, 1, function, groups[i], scale_range, -1) sort_index = np.argsort(delta).tolist() groups = [] for i in range(groups_num): groups.append(sort_index[i * max_number:(i + 1) * max_number]) return groups
def DECC_DG(func_num): cost = 2 bench = Benchmark() function = bench.get_function(func_num) groups = CCDE(1000) intercept = function(np.zeros((1, 1000))[0]) for i in range(len(groups) - 1): if i < len(groups) - 1: cost += 2 index1 = np.zeros((1, 1000))[0] index1[groups[i][0]] = 1 delta1 = function(index1) - intercept for j in range(i + 1, len(groups)): cost += 2 if i < len(groups) - 1 and j < len( groups) and not help.DG_Differential( groups[i][0], groups[j][0], delta1, function, intercept): groups[i].extend(groups.pop(j)) j -= 1 return groups, cost
checkpoints = ( np.array([0.04, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]) * maxFEs).astype(int) #array for fid in Functions: #print("Processing f" + str(fid)) filename = "myresults_" + str(popsize1) + "pop1_" + str( popsize2) + "pop2_f" + str(fid) + ".csv" outputfile = open(filename, "w+") outputfile.write("FEs,Value,Seed,Function\n") for Rseed in seeds: ''' Set objective function: the CEC'13 LSGO Benchmark presents 15 minimization problems. ''' bench = Benchmark() fobj = bench.get_function(fid) #objective function info = bench.get_info( fid ) #info is a dictionary which keys are lower, upper, dimension, threshold and best. ''' Set parameters ''' np.random.seed(Rseed) currentFEs = 0 results = [ ] #list of tuples. Each tuple will be of the form (checkpoint,decision_vector,value_of_decision_vector) ''' Set components ''' DE1 = shade(fobj, info,
def main(args): global SR_MTS, SR_global_MTS """ Main program. It uses Run DE for experiments. F, CR must be float, or 'n' as a normal """ description = "SHADEILS" parser = argparse.ArgumentParser(description) parser.add_argument("-f", default=16, type=int, dest="function", help='function') # Argument for problem type: D4, D12 or D19, both with or without noise parser.add_argument("-pr", required=True, type=str, choices=list( ("D4", "D12", "D19", "D4N", "D12N", "D19N")), dest="problem", help='problem type') parser.add_argument("-v", default=False, dest="verbose", action='store_true', help='verbose mode') parser.add_argument("-s", default=1, type=int, dest="seed", choices=range(1, 6), help='seed (1 - 5)') parser.add_argument("-r", default=1, type=int, dest="run", help='runs') parser.add_argument("-e", required=False, type=int, dest="maxevals", help='maxevals') # Argument for index of evals, that is [1,2,3] -> [1.2e5, 6e5,3e6] parser.add_argument("-i", default=1, type=int, dest="index", choices=range(1, 4), help="index of evals") parser.add_argument("-t", default=0.001, type=float, dest="threshold", help='threshold') parser.add_argument("-p", default=100, type=int, dest="popsize", help='population size') parser.add_argument("-H", default=3, type=int, dest="shade_h", help='SHADE history size') parser.add_argument("-d", default="results", type=str, dest="dir_output", help='directory output') # Time stamp for comparative study time0 = time.clock() #seeds seeds = [24, 45689, 97232447, 96793335, 12345679] args = parser.parse_args(args) fun = args.function problem = args.problem # Noise problem, False by default noise = False if (problem in ["D4", "D4N"]): dim = 1024 elif (problem in ["D12", "D12N"]): dim = 3072 elif (problem in ["D19", "D19N"]): dim = 4864 if ("N" in problem): print("Problem with noise") noise = True print("EEG Problem: {0}".format(problem)) print("Problem: {0}".format(problem)) print("Dimension: {0}".format(dim)) print("Seed: {0}".format(args.seed)) print("Treshold: {0}".format(args.threshold)) print("Popsize: {0}".format(args.popsize)) if args.shade_h is None: args.shade_h = min(args.popsize, 100) print("SHADE_H: {0}".format(args.shade_h)) if (args.maxevals): evals = list(map(int, [1.0e5, 6e5, 3e6])[:args.maxevals]) else: evals = list(map(int, [1.0e5, 6e5, 3e6])) evals_index = args.index bench = Benchmark() maxfuns = bench.get_num_functions() funinfo = bench.get_info(fun, dim) if not (1 <= fun <= maxfuns and 1 <= args.seed <= 5): parser.print_help() sys.exit(1) name = "SHADEILS" fname = name + "_EEGProblem_" + problem + "_" + str( evals[evals_index - 1]) + ".txt" output = path.join(args.dir_output, fname) if not args.verbose and isfile(output): fin = open(output, 'rb') lines = fin.readlines() fin.close() if lines: print("Experiment already exists. Check results folder") return if not args.verbose: fid = open(output, 'w') else: fid = sys.stdout # Parameter commons #bench.set_algname("shadeils_restart0.1_pos") fitness_fun = bench.get_function(fun, dim, noise) seed(seeds[args.seed - 1]) for _ in range(args.run): SR_MTS = [] SR_global_MTS = [] SHADEILS.ihshadels(fitness_fun, funinfo, dim, evals, evals_index, fid, time0, threshold=args.threshold, popsize=args.popsize, info_de=args.shade_h) bench.next_run() fid.close()
def main(args): global SR_MTS, SR_global_MTS """ Main program. It uses Run DE for experiments. F, CR must be float, or 'n' as a normal """ description = "IHDELS" parser = argparse.ArgumentParser(description) parser.add_argument("-f", required=True, type=int, choices=range(1, 16), dest="function", help='function') parser.add_argument("-v", default=False, dest="verbose", action='store_true', help='verbose mode') parser.add_argument("-s", default=1, type=int, dest="seed", choices=range(1, 6), help='seed (1 - 5)') parser.add_argument("-r", default=5, type=int, dest="run", help='runs') parser.add_argument("-e", required=False, type=int, dest="maxevals", help='maxevals') parser.add_argument("-t", default=0.01, type=float, dest="threshold", help='threshold') parser.add_argument("-p", default=100, type=int, dest="popsize", help='population size') parser.add_argument("-H", default=None, type=int, dest="shade_h", help='SHADE history size') parser.add_argument("-d", default="results", type=str, dest="dir_output", help='directory output') #seeds seeds = [23, 45689, 97232447, 96793335, 12345679] args = parser.parse_args(args) fun = args.function dim = 1000 print("Function: {0}".format(fun)) print("Seed: {0}".format(args.seed)) print("Treshold: {0}".format(args.threshold)) print("Popsize: {0}".format(args.popsize)) if args.shade_h is None: args.shade_h = min(args.popsize, 100) print("SHADE_H: {0}".format(args.shade_h)) if (args.maxevals): evals = list(map(int, [1.2e5, 6e5, 3e6])[:args.maxevals]) else: evals = list(map(int, [1.2e5, 6e5, 3e6])) print(evals) bench = Benchmark() maxfuns = bench.get_num_functions() funinfo = bench.get_info(fun) if not (1 <= fun <= maxfuns and 1 <= args.seed <= 5): parser.print_help() sys.exit(1) name = "SHADEILS" fname = name +"_pop{args.popsize}_H{args.shade_h}_t{args.threshold:.2f}_F{args.function}_{args.seed}r{args.run}.txt".format(args=args); output = path.join(args.dir_output, fname) if not args.verbose and isfile(output): fin = open(output, 'rb') lines = fin.readlines() fin.close() if lines: print("Experiment already exists. Check results folder") return if not args.verbose: fid = open(output, 'w') else: fid = sys.stdout # Parameter commons #bench.set_algname("shadeils_restart0.1_pos") fitness_fun = bench.get_function(fun) seed(seeds[args.seed-1]) for _ in range(args.run): SR_MTS = [] SR_global_MTS = [] SHADEILS.ihshadels(fitness_fun, funinfo, dim, evals, fid, threshold=args.threshold, popsize=args.popsize, info_de=args.shade_h) bench.next_run() fid.close()
def LASSOCC(func_num): Dim = 1000 group_dim = 20 size = group_dim * 100 degree = 3 bench = Benchmark() max_variables_num = group_dim function = bench.get_function(func_num) benchmark_summary = bench.get_info(func_num) scale_range = [benchmark_summary['lower'], benchmark_summary['upper']] verify_time = 0 All_groups = [] intercept = function(np.zeros((1, 1000))[0]) one_bias = [] for i in range(1000): index = np.zeros((1, 1000))[0] index[i] = 1 one_bias.append(function(index) - intercept) verify_time += 1001 for current_index in range(0, int(Dim / group_dim)): # print(current_index) Lasso_model, Feature_names = SparseModel.Regression( degree, size, Dim, group_dim, current_index, scale_range, function) # Grouping coef, Feature_names = help.not_zero_feature( Lasso_model.coef_, help.feature_names_normalization(Feature_names)) groups = help.group_DFS(group_dim, Feature_names, max_variables_num) # print(groups) bias = current_index * group_dim for g in groups: for i in range(len(g)): g[i] += bias for g in groups: if not g or g is None: groups.remove(g) # We need to check the relationship between new groups and previous groups temp_groups = [] for i in range(len(All_groups)): for j in range(len(groups)): if i < len(All_groups) and j < len(groups): verify_time += 1 if not help.Differential(All_groups[i][0], groups[j][0], function, intercept, one_bias): g1 = All_groups.pop(i) g2 = groups.pop(j) temp_groups.append(g1 + g2) i -= 1 j -= 1 break for g in All_groups: temp_groups.append(g) for g in groups: temp_groups.append(g) All_groups = temp_groups.copy() return All_groups, verify_time + 100000
from cec2013lsgo.cec2013 import Benchmark import os.path as path if __name__ == '__main__': # func_num = 11 Dim = 1000 NIND = 30 bench = Benchmark() EFs = 3000000 for func_num in range(2, 16): test_time = 1 name = 'f' + str(func_num) benchmark_summary = bench.get_info(func_num) scale_range = [benchmark_summary['lower'], benchmark_summary['upper']] function = bench.get_function(func_num) groups_One = CCDE(Dim) for iteration in range(1, 2): EFs = 3000000 name = 'f' + str(func_num) """The next is DE optimization""" best_indexes, best_obj_trace_CC, up, down = DE.DECC_CL_CCDE(Dim, NIND, iteration, function, scale_range, groups_One) delta = [] for i in range(len(up)): delta.append((up[i]-down[i])/(scale_range[1]-scale_range[0])) up_bound = max(delta) down_bound = min(delta) mean = sum(delta) / len(delta)