def newCandidate_beyes(self): N_newCandidates = self.Ncand_min # Number of new candidated made by each core Neach = int(np.ceil(N_newCandidates / self.comm.size)) # Use all cores on nodes. N_newCandidates = Neach * N_newCandidates # perform mutations self.comm.barrier() if self.master: print('mutations starting', flush=True) t0 = time() anew_mutated_list = self.mutate(Neach) self.comm.barrier() if self.master: print('mutation time:', time() - t0, flush=True) # Relax with MLmodel anew_list = [] E_list = [] error_list = [] for anew_mutated in anew_mutated_list: anew = self.relaxML(anew_mutated, with_error=self.errorRelax) anew_list.append(anew) E, error, theta0 = self.MLmodel.predict_energy(anew, return_error=True) E_list.append(E) error_list.append(error) E_list = np.array(E_list) error_list = np.array(error_list) self.comm.barrier() if self.master: print('sqrt(theta0):', np.sqrt(np.abs(theta0)), flush=True) operation_index = np.array([ self.operation_dict.index(a.info['key_value_pairs']['origin']) for a in anew_list ]).astype(int) # Syncronize all new candidates on all cores anew_mutated_all = sync_atoms(world, atoms_list=anew_mutated_list, operation_dict=self.operation_dict, operation_index=operation_index) anew_all = sync_atoms(world, atoms_list=anew_list, Epred_list=E_list, error_list=error_list, kappa=self.kappa, operation_dict=self.operation_dict, operation_index=operation_index) self.comm.barrier() if self.master: print('candidates syncronized', flush=True) # filter away very uncertain structures error_all_tmp = np.array( [a.info['key_value_pairs']['predictedError'] for a in anew_all]) min_certainty = self.min_certainty for _ in range(5): filt = error_all_tmp < min_certainty * np.sqrt(np.abs(theta0)) if np.sum(filt.astype(int)) > 0: anew_mutated_all = [ anew_mutated_all[i] for i in range(len(anew_all)) if filt[i] ] anew_all = [ anew_all[i] for i in range(len(anew_all)) if filt[i] ] break else: min_certainty = min_certainty + (1 - min_certainty) / 2 # include relaxed population if (self.NsearchIter % self.Nuse_pop_as_candidates) == 0: anew_mutated_all = self.population.pop + anew_mutated_all anew_all = self.population.pop_MLrelaxed + anew_all self.comm.barrier() if self.master: print('model relaxed candidates done', flush=True) # Write candidates to file if self.master: label_relaxed = self.ML_dir + 'ML_relaxed{}'.format( self.traj_counter) write(label_relaxed + '.traj', anew_all, parallel=False) label_unrelaxed = self.ML_dir + 'ML_unrelaxed{}'.format( self.traj_counter) write(label_unrelaxed + '.traj', anew_mutated_all, parallel=False) # Add force-mutated structures to candidates #anew_forceMut, anew_preForceMut = self.get_force_mutated_population() #anew_all += anew_forceMut #anew_mutated_all += anew_preForceMut # Extract + print data E_all = np.array( [a.info['key_value_pairs']['predictedEnergy'] for a in anew_all]) error_all = np.array( [a.info['key_value_pairs']['predictedError'] for a in anew_all]) fitness_all = np.array( [a.info['key_value_pairs']['fitness'] for a in anew_all]) if self.master: print('{}:\n'.format(self.traj_counter), np.c_[E_all, error_all, fitness_all]) return anew_all, anew_mutated_all, E_all, error_all
def newCandidate_beyes(self): N_newCandidates = self.Ncand_min # Number of new candidated made by each core Neach = int(np.ceil(N_newCandidates / self.comm.size)) # Use all cores on nodes. N_newCandidates = Neach * N_newCandidates # perform mutations if self.master: t0 = time() anew_mutated_list = self.mutate(Neach) if self.master: print('mutation time:', time() - t0, flush=True) # Relax with MLmodel anew_list = [] E_list = [] error_list = [] for anew_mutated in anew_mutated_list: anew = self.relaxML(anew_mutated, with_error=True) anew_list.append(anew) E, error, theta0 = self.MLmodel.predict_energy(anew, return_error=True) E_list.append(E) error_list.append(error) E_list = np.array(E_list) error_list = np.array(error_list) if self.master: print('theta0:', theta0, flush=True) operation_index = np.array([ self.operation_dict.index(a.info['key_value_pairs']['origin']) for a in anew_list ]).astype(int) # Syncronize all new candidates on all cores anew_mutated_all = sync_atoms(world, atoms_list=anew_mutated_list, operation_dict=self.operation_dict, operation_index=operation_index) anew_all = sync_atoms(world, atoms_list=anew_list, Epred_list=E_list, error_list=error_list, kappa=self.kappa, operation_dict=self.operation_dict, operation_index=operation_index) error_all_tmp = np.array( [a.info['key_value_pairs']['predictedError'] for a in anew_all]) min_certainty = self.min_certainty for _ in range(5): filt = error_all_tmp < min_certainty * np.sqrt(np.abs(theta0)) if np.sum(filt.astype(int)) > 0: anew_mutated_all = [ anew_mutated_all[i] for i in range(len(anew_all)) if filt[i] ] anew_all = [ anew_all[i] for i in range(len(anew_all)) if filt[i] ] break else: min_certainty = min_certainty + (1 - min_certainty) / 2 # Write candidates to file if self.master: label_relaxed = self.ML_dir + 'ML_relaxed{}'.format( self.traj_counter) write(label_relaxed + '.traj', anew_all, parallel=False) label_unrelaxed = self.ML_dir + 'ML_unrelaxed{}'.format( self.traj_counter) write(label_unrelaxed + '.traj', anew_mutated_all, parallel=False) # Extract + print data E_all = np.array( [a.info['key_value_pairs']['predictedEnergy'] for a in anew_all]) error_all = np.array( [a.info['key_value_pairs']['predictedError'] for a in anew_all]) fitness_all = np.array( [a.info['key_value_pairs']['fitness'] for a in anew_all]) if self.master: print('{}:\n'.format(self.traj_counter), np.c_[E_all, error_all, fitness_all]) return anew_all, anew_mutated_all, E_all, error_all
def newCandidate_beyes(self, prior_fitness=False): N_newCandidates = self.Ncand_min # Number of new candidated made by each core Neach = int(np.ceil(N_newCandidates / self.comm.size)) # Use all cores on nodes. N_newCandidates = Neach * N_newCandidates # perform mutations if self.master: t0 = time() anew_mutated_list = self.mutate(Neach) if self.master: print('mutation time:', time() - t0, flush=True) # Relax with MLmodel anew_list = [] E_list = [] error_list = [] dmin_list = [] for anew_mutated in anew_mutated_list: anew = self.relaxML(anew_mutated, with_error=self.errorRelax) anew_list.append(anew) if prior_fitness: E, error, theta0 = self.MLmodel_prior.predict_energy( anew, return_error=True) else: E, error, theta0 = self.MLmodel.predict_energy( anew, return_error=True) E_list.append(E) error_list.append(error) dmin = self.get_minDistance2data(anew) dmin_list.append(dmin) E_list = np.array(E_list) error_list = np.array(error_list) dmin_list = np.array(dmin_list) if self.master: if self.use_fine_model: print('sqrt(theta0_prior):', np.sqrt(np.abs(self.MLmodel_prior.theta0)), 'sqrt(theta0_fine):', np.sqrt(np.abs(self.MLmodel_fine.theta0)), flush=True) else: print('sqrt(theta0_prior):', np.sqrt(np.abs(self.MLmodel_prior.theta0)), flush=True) operation_index = np.array([ self.operation_dict.index(a.info['key_value_pairs']['origin']) for a in anew_list ]).astype(int) # Syncronize all new candidates on all cores anew_mutated_all = sync_atoms(world, atoms_list=anew_mutated_list, operation_dict=self.operation_dict, operation_index=operation_index) anew_all = sync_atoms(world, atoms_list=anew_list, Epred_list=E_list, error_list=error_list, dmin_list=dmin_list, kappa=self.kappa, operation_dict=self.operation_dict, operation_index=operation_index) # Filter out very uncertain structures theta0_prior = self.MLmodel_prior.theta0 error_all_tmp = np.array( [a.info['key_value_pairs']['predictedError'] for a in anew_all]) min_certainty = self.min_certainty for _ in range(5): filt = error_all_tmp < min_certainty * np.sqrt( np.abs(theta0_prior)) if np.sum(filt.astype(int)) > 0: anew_mutated_all = [ anew_mutated_all[i] for i in range(len(anew_all)) if filt[i] ] anew_all = [ anew_all[i] for i in range(len(anew_all)) if filt[i] ] break else: min_certainty = min_certainty + (1 - min_certainty) / 2 # Filter structures that are too close to known data """ if self.use_fine_model: min_distance = 0.02 * self.MLmodel_fine.sigma else: min_distance = self.min_distance """ dmin_all_tmp = np.array( [a.info['key_value_pairs']['dmin'] for a in anew_all]) distance_filter = dmin_all_tmp > self.min_distance anew_mutated_all = [ anew_mutated_all[i] for i in range(len(anew_all)) if distance_filter[i] ] anew_all = [ anew_all[i] for i in range(len(anew_all)) if distance_filter[i] ] self.comm.barrier() if self.master: print('model relaxed candidates done', flush=True) # Write candidates to file if self.master: label_relaxed = self.ML_dir + 'ML_relaxed{}'.format( self.traj_counter) write(label_relaxed + '.traj', anew_all, parallel=False) label_unrelaxed = self.ML_dir + 'ML_unrelaxed{}'.format( self.traj_counter) write(label_unrelaxed + '.traj', anew_mutated_all, parallel=False) # Add force-mutated structures to candidates #anew_forceMut, anew_preForceMut = self.get_force_mutated_population() #anew_all += anew_forceMut #anew_mutated_all += anew_preForceMut # Extract + print data E_all = np.array( [a.info['key_value_pairs']['predictedEnergy'] for a in anew_all]) error_all = np.array( [a.info['key_value_pairs']['predictedError'] for a in anew_all]) fitness_all = np.array( [a.info['key_value_pairs']['fitness'] for a in anew_all]) if self.master: print('{}:\n'.format(self.traj_counter), np.c_[E_all, error_all, fitness_all]) return anew_all, anew_mutated_all, E_all, error_all