def _parse_config_file(self, file_name): # first parse the json file self.json_parser = ParserJSON(file_name = file_name) self.json_parser.parse() self.param_dict = self.json_parser.param_dict VarDictParser.__init__(self, self.param_dict['variables']) ObsDictParser.__init__(self, self.param_dict['objectives'])
def __init__(self, var_dicts, observed_params, observed_losses, batch_size, backend = 'pymc3', model_details = None): VarDictParser.__init__(self, var_dicts) self.observed_params = observed_params self.observed_losses = observed_losses self.batch_size = batch_size self.backend = backend self.model_details = self.MODEL_DETAILS if model_details: for key, value in model_details.items(): self.model_details[key] = value # get the volume of the domain self.volume = np.prod(self.var_p_ranges) if backend == 'pymc3': from BayesianNeuralNetwork.pymc3_interface import Pymc3Network self.network = Pymc3Network(self.var_dicts, observed_params, observed_losses, batch_size, self.model_details) elif backend == 'edward': from BayesianNeuralNetwork.edward_interface import EdwardNetwork self.network = EdwardNetwork(self.var_dicts, observed_params, observed_losses, batch_size, self.model_details) else: raise NotImplementedError() if self.batch_size == 1: self.lambda_values = [0.] else: # self.lambda_values = np.linspace(1.0, 2.0, self.batch_size) self.lambda_values = np.linspace(-0.25, 0.25, self.batch_size) self.lambda_values = self.lambda_values[::-1] self.lambda_values *= 1 / self.volume self.sqrt2pi = np.sqrt(2 * np.pi)
def __init__(self, var_dicts, obs_dicts, softness=0.01): VarDictParser.__init__(self, var_dicts) ObsDictParser.__init__(self, obs_dicts) self.softness = softness self.loss_shaper = HierarchicalLossShaper(self.loss_tolerances, self.softness) self.all_lower = [] self.all_upper = [] for var_index, full_var_dict in enumerate(self.var_dicts): var_dict = full_var_dict[self.var_names[var_index]] if 'low' in var_dict: self.all_lower.extend([ var_dict['low'] for i in range(self.var_sizes[var_index]) ]) self.all_upper.extend([ var_dict['high'] for i in range(self.var_sizes[var_index]) ]) else: self.all_lower.extend( [0. for i in range(self.var_sizes[var_index])]) self.all_upper.extend([ len(var_dict['options']) for i in range(self.var_sizes[var_index]) ]) self.all_lower = np.array(self.all_lower) self.all_upper = np.array(self.all_upper) self.soft_lower = self.all_lower + 0.1 * (self.all_upper - self.all_lower) self.soft_upper = self.all_upper - 0.1 * (self.all_upper - self.all_lower) self.soft_lower[self._cats] = -10**6 self.soft_upper[self._cats] = 10**6
def __init__(self, var_infos, var_dicts): VarDictParser.__init__(self, var_dicts) self.local_opt = ParameterOptimizer(var_dicts) self.var_infos = var_infos for key, value in self.var_infos.items(): setattr(self, str(key), value) self.total_size = np.sum(self.var_sizes) self.random_number_generator = RandomNumberGenerator()
def __init__(self, var_dicts): VarDictParser.__init__(self, var_dicts) self.total_size = 0 self.var_sizes = [] self.var_names = [] for var_dict in self.var_dicts: self.total_size += var_dict[list(var_dict)[0]]['size'] self.var_sizes.append(int(var_dict[list(var_dict)[0]]['size'])) self.var_names.append(list(var_dict)[0])
def __init__(self, var_dicts, observed_params, observed_losses, batch_size, model_details): VarDictParser.__init__(self, var_dicts) self.observed_params = observed_params self.observed_losses = observed_losses self.num_obs = len(self.observed_losses) self.batch_size = batch_size self.model_details = model_details for key, value in self.model_details.items(): setattr(self, str(key), value) self._get_weight_and_bias_shapes()