def step(self, theta, E_old, x_obs, mod, params): # (1. Generate) theta_new = self.proposal(theta, E_old) theta_new = np.clip(theta_new, a_min=self.config_method['clip_min'], a_max=self.config_method['clip_max']) # (2. Evaluate) E_new = calculate_fitness(x_obs, theta_new, mod, params, self.dist, self.config_model, self.config_method) # (3. Select) theta_cat = np.concatenate((theta, theta_new), 0) E_cat = np.concatenate((E_old, E_new), 0) indx = np.argsort(E_cat.squeeze()) return theta_cat[indx[:theta.shape[0]],:], E_cat[indx[:theta.shape[0]],:]
def step(self, theta, E_old, x_obs, mod, params): # (1. Generate) theta_new = self.proposal(theta, E_old) theta_new = np.clip(theta_new, a_min=self.config_method['clip_min'], a_max=self.config_method['clip_max']) # (2. Evaluate) E_new = calculate_fitness(x_obs, theta_new, mod, params, self.dist, self.config_model, self.config_method) # (3. Select) m = (E_new < E_old) * 1. if np.mean(m) < 0.2: self.sigma = self.sigma * self.c elif np.mean(m) > 0.2: self.sigma = self.sigma / self.c return m * theta_new + (1. - m) * theta, m * E_new + (1. - m) * E_old
def run(dir_method, json_method, dir_model, json_model, dir_results, dir_solver, json_solver, dir_data=None, file_data=None, exp_sign='_exp_'): config_method = Config(os.path.join(dir_method, json_method)) config_model = Config(os.path.join(dir_model, json_model)) config_solver = Config(os.path.join(dir_solver, json_solver)) # Experiment name exp_name = exp_sign + config_method.config['method_name'] + '_' # Load PySCES model mod = pysces.model(config_model.config['mod_name'], dir=dir_model) # Solver settings mod.__settings__["mode_sim_max_iter"] = config_solver.config[ "mode_sim_max_iter"] mod.__settings__['lsoda_atol'] = config_solver.config['lsoda_atol'] mod.__settings__['lsoda_rtol'] = config_solver.config['lsoda_rtol'] mod.__settings__['lsoda_mxordn'] = config_solver.config['lsoda_mxordn'] mod.__settings__['lsoda_mxords'] = config_solver.config['lsoda_mxords'] mod.__settings__['lsoda_mxstep'] = config_solver.config['lsoda_mxstep'] # =====REAL DATA PREPARATION===== # Remove fixed_species from params. We do it only once. params = remove_fixed(mod.parameters, mod.fixed_species, compartment=config_model.config['compartment']) if dir_data is not None: config_model.config['real_data'] = True mod.sim_start = config_model.config['sim_start'] mod.sim_end = config_model.config['sim_end'] mod.sim_points = config_model.config['sim_points'] x_obs = np.load(os.path.join(dir_data, file_data)) else: config_model.config['real_data'] = False x_obs, t = generate_data(mod, params, sim_start=config_model.config['sim_start'], sim_end=config_model.config['sim_end'], sim_points=config_model.config['sim_points'], noise=config_model.config['noise']) real_params = read_real_parameters(mod, params) real_params_array = dict_to_array(real_params, params) np.save(os.path.join(dir_results, exp_name + 'x_obs.npy'), x_obs) np.save(os.path.join(dir_results, exp_name + 't.npy'), t) np.save(os.path.join(dir_results, exp_name + 'real_params_array.npy'), real_params_array) json.dump( real_params, open(os.path.join(dir_results, exp_name + 'real_params.json'), "w")) json.dump( params, open(os.path.join(dir_results, exp_name + 'params.json'), "w")) pickle.dump(mod, open(os.path.join(dir_results, exp_name + 'mod.pkl'), "wb")) # =======EXPERIMENT======= # dump, just in case, configs pickle.dump( config_method.config, open(os.path.join(dir_results, exp_name + 'config_method.pkl'), "wb")) pickle.dump( config_model.config, open(os.path.join(dir_results, exp_name + 'config_model.pkl'), "wb")) # Init method # -get all classes in the file classes = [x for x in dir(EA) if isclass(getattr(EA, x))] # -check whether the provided name is available assert config_method.config[ 'method_name'] in classes, 'Wrong name of the method! Please pick one of the following methods: {}'.format( classes) # -initialize the appropriate class module = __import__("algorithms.population_optimization_algorithms", fromlist=[config_method.config['method_name']]) my_class = getattr(module, config_method.config['method_name']) method = my_class(config_method.config, config_model.config) # Init parameters theta = np.random.uniform(low=config_model.config['low'], high=config_model.config['high'], size=(config_method.config['pop_size'], len(params))) theta = np.clip(theta, a_min=config_method.config['clip_min'], a_max=config_method.config['clip_max']) # Calcute their energy E = calculate_fitness(x_obs, theta, mod, params, dist=method.dist, config_model=config_model.config, config_method=config_method.config) # -=Start experiment=- best_E = [np.min(E)] all_E = E all_theta = theta clock_start = time.time() print('START ~~~~~~>') g = config_method.config['generations'] for i in range(g): print(f'========> Generation {i+1}/{g}') theta, E = method.step(theta, E, x_obs, mod, params) if np.min(E) < best_E[-1]: best_E.append(np.min(E)) else: best_E.append(best_E[-1]) all_theta = np.concatenate((all_theta, theta), 0) all_E = np.concatenate((all_E, E), 0) # SAVING np.save(os.path.join(dir_results, exp_name + 'all_theta.npy'), all_theta) np.save(os.path.join(dir_results, exp_name + 'all_E.npy'), all_E) np.save(os.path.join(dir_results, exp_name + 'best_E.npy'), np.asarray(best_E)) # early stopping if i > config_method.config['patience']: if best_E[-config_method.config['patience']] == best_E[-1]: break print('~~~~~~> END') clock_stop = time.time() print('Time elapsed: {}'.format(clock_stop - clock_start)) np.save(os.path.join(dir_results, exp_name + 'time.npy'), np.asarray(clock_stop - clock_start))
def run(mod_name='wolf1', sim_start=0.0, sim_end=30., sim_points=30, exp_sign='exp_1_', method_name='DE', generations=5, pop_size=500, clip_min=0., clip_max=15., a=-100., b=100., scale=1., p=-1., std=0.1, gamma=0.75, CR=0.9, best=False, dist_name='truncnorm', low=0., high=100., indices=None, compartment=True, patience=100, noise=0.1, dir_model='C:\\Dev\\github\\abcde\\', slash='\\'): # Experiment name exp_name = exp_sign + method_name + '_' # Load PySCES model mod = pysces.model(mod_name, dir=dir_model) # Solver settings mod.__settings__["mode_sim_max_iter"] = 0 mod.__settings__['lsoda_atol'] = 1.0e-012 mod.__settings__['lsoda_rtol'] = 1.0e-007 mod.__settings__['lsoda_mxordn'] = 12 mod.__settings__['lsoda_mxords'] = 5 mod.__settings__['lsoda_mxstep'] = 0 # =====REAL DATA PREPARATION===== # Remove fixed_species from params. Do it only once params = remove_fixed(mod.parameters, mod.fixed_species, compartment=compartment) x_obs, t = generate_data(mod, params, sim_start=sim_start, sim_end=sim_end, sim_points=sim_points, noise=noise) real_params = read_real_parameters(mod, params) real_params_array = dict_to_array(real_params, params) np.save(dir_model + 'results' + slash + exp_name + 'x_obs.npy', x_obs) np.save(dir_model + 'results' + slash + exp_name + 't.npy', t) np.save(dir_model + 'results' + slash + exp_name + 'real_params_array.npy', real_params_array) json.dump( real_params, open(dir_model + 'results' + slash + exp_name + 'real_params.json', "w")) json.dump( params, open(dir_model + 'results' + slash + exp_name + 'params.json', "w")) pickle.dump( mod, open(dir_model + 'results' + slash + exp_name + 'mod.pkl', "wb")) # =======EXPERIMENT======= # config conf = Config(method_name=method_name, generations=generations, pop_size=pop_size, clip_min=clip_min, clip_max=clip_max, a=a, b=b, scale=scale, p=p, std=std, gamma=gamma, CR=CR, best=best, dist_name=dist_name, indices=indices, patience=patience) pickle.dump( conf.config, open(dir_model + 'results' + slash + exp_name + 'config.pkl', "wb")) # Init method if method_name in ['DE']: method = DE(conf.config) elif method_name in ['RevDE']: method = RevDE(conf.config) elif method_name in ['ES']: method = ES(conf.config) elif method_name in ['EDA']: method = EDA(conf.config) elif method_name in ['RevDEknn']: method = RevDEknn(conf.config) elif method_name in ['EDAknn']: method = RevDEknn(conf.config) else: raise ValueError('Wrong method! Only DE, ABC_DE and ABC_MH.') # Init parameters theta = np.random.uniform(low=low, high=high, size=(conf.config['pop_size'], len(params))) theta = np.clip(theta, a_min=conf.config['clip_min'], a_max=conf.config['clip_max']) # Calcute their energy E = calculate_fitness(x_obs, theta, mod, params, dist=method.dist, conf=conf.config) # Start experiment best_E = [np.min(E)] all_E = E all_theta = theta clock_start = time.time() print('START ~~~~~~>') g = conf.config['generations'] for i in range(conf.config['generations']): print(f'========> Generation {i+1}/{g}') theta, E = method.step(theta, E, x_obs, mod, params) if np.min(E) < best_E[-1]: best_E.append(np.min(E)) else: best_E.append(best_E[-1]) all_theta = np.concatenate((all_theta, theta), 0) all_E = np.concatenate((all_E, E), 0) # SAVING np.save(dir_model + 'results' + slash + exp_name + 'all_theta.npy', all_theta) np.save(dir_model + 'results' + slash + exp_name + 'all_E.npy', all_E) np.save(dir_model + 'results' + slash + exp_name + 'best_E.npy', np.asarray(best_E)) # early stopping if i > patience: if best_E[-patience] == best_E[-1]: break print('~~~~~~> END') clock_stop = time.time() print('Time elapsed: {}'.format(clock_stop - clock_start)) np.save(dir_model + 'results' + slash + exp_name + 'time.npy', np.asarray(clock_stop - clock_start))