def __init__(self, path, attributes, name='table', verbosity=0): self.WRITING_REQUESTS = [] self.READING_REQUESTS = {} self.UPDATE_REQUESTS = [] Logger.__init__(self, 'SQLite interface', verbosity=verbosity) self.db_path = 'sqlite:///%s/search_progress.db' % path self.attributes = attributes self.name = name self.log('creating database %s at %s' % (self.name, self.db_path), 'DEBUG') # create database self.db = sql.create_engine(self.db_path) self.db.echo = False self.metadata = sql.MetaData(self.db) # create table in database self.table = sql.Table(self.name, self.metadata) for name, att_type in self.attributes.items(): self.table.append_column( sql.Column(name, self.SQLITE_COLUMNS[att_type])) self.table.create(checkfirst=True) # start request processor self._process_requests()
def __init__(self, attributes, entries=[], verbosity=0): Logger.__init__(self, 'DB_Cache', verbosity=verbosity) self.attributes = attributes self.cache = {attr: [] for attr in self.attributes} self.num_items = 0 for entry in entries: self.add(entry)
def __init__(self, config): self.config = config Logger.__init__(self, 'Acquisition', self.config.get('verbosity')) self.random_sampler = RandomSampler(self.config.general, self.config.parameters) self.total_num_vars = len(self.config.feature_names) self.local_optimizers = None self.num_cpus = multiprocessing.cpu_count()
def __init__(self, config): self.config = config self.chimera = Chimera(self.config.obj_tolerances, self.config.get('softness')) Logger.__init__(self, 'ObservationProcessor', verbosity=self.config.get('verbosity')) # compute some boundaries self.feature_lowers = self.config.feature_lowers self.feature_uppers = self.config.feature_uppers self.soft_lower = self.feature_lowers + 0.1 * (self.feature_uppers - self.feature_lowers) self.soft_upper = self.feature_uppers - 0.1 * (self.feature_uppers - self.feature_lowers)
def __init__(self, config_general, config_params): self.config_general = config_general self.config_params = config_params verbosity = self.config_general.verbosity if 'random_sampler' in self.config_general.verbosity: verbosity = self.config_general.verbosity['random_sampler'] Logger.__init__(self, 'RandomSampler', verbosity) if self.config_general.sampler == 'sobol': from RandomSampler.sobol import SobolContinuous self.continuous_sampler = SobolContinuous() elif self.config_general.sampler == 'uniform': from RandomSampler.uniform import UniformContinuous self.continuous_sampler = UniformContinuous() else: PhoenicsUnknownSettingsError('did not understanding sampler setting: "%s".\n\tChoose from "uniform" or "sobol"' % self.config_general.sampler)
def __init__(self, config): self.config = config Logger.__init__(self, 'ParamOptimizer', verbosity = self.config.get('verbosity')) # parse positions self.pos_continuous = np.full(self.config.num_features, False, dtype = bool) for feature_index, feature_type in enumerate(self.config.feature_types): self.pos_continuous[feature_index] = True # set up continuous optimization algorithms cont_opt_name = self.config.get('continuous_optimizer') if cont_opt_name == 'adam': from Acquisition.NumpyOptimizers import AdamOptimizer self.opt_con = AdamOptimizer() else: PhoenicsUnkownSettingsError('did not understand continuous optimizer "%s".\n\tPlease choose from "adam"' % cont_opt_name)
def __init__(self, config, model_details=None): self.COUNTER = 0 self.has_sampled = False self.config = config verbosity = self.config.get('verbosity') if 'bayesian_network' in verbosity: verbosity = verbosity['bayesian_network'] Logger.__init__(self, 'BayesianNetwork', verbosity=verbosity) self.kernel_contribution = lambda x: (np.sum(x), 1.) # get bnn model detals if model_details == None: from BayesianNetwork.model_details import model_details self.model_details = model_details # set up bnn if self.config.get('backend') == 'tfprob': from BayesianNetwork.TfprobInterface import TfprobNetwork self.network_executable = '{}/BayesianNetwork/TfprobInterface/tfprob_interface.py'.format( self.config.get('home')) elif self.config.get('backend') == 'edward': from BayesianNetwork.EdwardInterface import EdwardNetwork self.network_executable = '%s/BayesianNetwork/EdwardInterface/edward_interface.py' % self.config.get( 'home') else: PhoenicsUnknownSettingsError( 'did not understand backend: "%s".\n\tChoose from "tfprob" or "edward"' % self.config_general.backend) # get domain volume self.volume = 1. feature_lengths = self.config.feature_lengths feature_ranges = self.config.feature_ranges for feature_index, feature_type in enumerate( self.config.feature_types): self.volume *= feature_ranges[feature_index] self.inverse_volume = 1 / self.volume # compute sampling parameter values if self.config.get('sampling_strategies') == 1: self.sampling_param_values = np.zeros(1) else: self.sampling_param_values = np.linspace( -1.0, 1.0, self.config.get('sampling_strategies')) self.sampling_param_values = self.sampling_param_values[::-1] self.sampling_param_values *= self.inverse_volume
def __init__(self, config): self.config = config Logger.__init__(self, 'DB_Writer', self.config.get('verbosity'))
def __init__(self, config_file=None, config_dict=None): Logger.__init__(self, 'ConfigParser', verbosity=0) self.config_file = config_file self.config_dict = config_dict
def __init__(self, config): self.config = config Logger.__init__(self, 'SampleSelector', verbosity=self.config.get('verbosity')) self.num_cpus = multiprocessing.cpu_count()