def init_kernels(self): self.kernels = [] self.is_refinable = {} self.bounds = {} self.params = [] self.param_bounds = [] self.idxs_mulsets = {} self.post_refinement_cleanup = {} for iv in self.s_prior: dist_name = montetheano.rstreams.rv_dist_name(iv.vals) if dist_name == 'normal': k = SquaredExponentialKernel() self.is_refinable[k] = get_refinability(iv, dist_name) self.bounds[k] = (None, None) elif dist_name == 'uniform': k = SquaredExponentialKernel() self.is_refinable[k] = get_refinability(iv, dist_name) if self.is_refinable[k]: low = tensor.get_constant_value( mt_dist.uniform_get_low(iv.vals)) high = tensor.get_constant_value( mt_dist.uniform_get_high(iv.vals)) self.bounds[k] = (low, high) elif dist_name == 'lognormal': k = LogSquaredExponentialKernel() self.is_refinable[k] = get_refinability(iv, dist_name) self.bounds[k] = (1e-8, None) elif dist_name == 'quantized_lognormal': k = LogSquaredExponentialKernel() self.is_refinable[k] = get_refinability(iv, dist_name) if self.is_refinable: lbound = tensor.get_constant_value( mt_dist.quantized_lognormal_get_round( iv.vals)) self.bounds[k] = (lbound, None) ff = picklable_instancemethod(self, 'qln_cleanup') self.post_refinement_cleanup[k] = ff elif dist_name == 'categorical': # XXX: a better CategoryKernel would have different # similarities for different choices k = CategoryKernel() self.is_refinable[k] = False # refinable is false, so not setting bounds else: raise TypeError("unsupported distribution", dist_name) self.kernels.append(k) self.params.extend(k.params()) self.param_bounds.extend(k.param_bounds()) # XXX : to be more robust, it would be nice to build an Env with # the idxs as outputs, and then run the MergeOptimizer on it. self.idxs_mulsets.setdefault(iv.idxs, []).append(k)
def init_kernels(self): self.kernels = [] self.is_refinable = {} self.bounds = {} self.params = [] self.param_bounds = [] self.idxs_mulsets = {} self.post_refinement_cleanup = {} for iv in self.s_prior: dist_name = montetheano.rstreams.rv_dist_name(iv.vals) if dist_name == 'normal': k = SquaredExponentialKernel() self.is_refinable[k] = get_refinability(iv, dist_name) self.bounds[k] = (None, None) elif dist_name == 'uniform': k = SquaredExponentialKernel() self.is_refinable[k] = get_refinability(iv, dist_name) if self.is_refinable[k]: low = tensor.get_constant_value( mt_dist.uniform_get_low(iv.vals)) high = tensor.get_constant_value( mt_dist.uniform_get_high(iv.vals)) self.bounds[k] = (low, high) elif dist_name == 'lognormal': k = LogSquaredExponentialKernel() self.is_refinable[k] = get_refinability(iv, dist_name) self.bounds[k] = (1e-8, None) elif dist_name == 'quantized_lognormal': k = LogSquaredExponentialKernel() self.is_refinable[k] = get_refinability(iv, dist_name) if self.is_refinable: lbound = tensor.get_constant_value( mt_dist.quantized_lognormal_get_round(iv.vals)) self.bounds[k] = (lbound, None) ff = picklable_instancemethod(self, 'qln_cleanup') self.post_refinement_cleanup[k] = ff elif dist_name == 'categorical': # XXX: a better CategoryKernel would have different # similarities for different choices k = CategoryKernel() self.is_refinable[k] = False # refinable is false, so not setting bounds else: raise TypeError("unsupported distribution", dist_name) self.kernels.append(k) self.params.extend(k.params()) self.param_bounds.extend(k.param_bounds()) # XXX : to be more robust, it would be nice to build an Env with # the idxs as outputs, and then run the MergeOptimizer on it. self.idxs_mulsets.setdefault(iv.idxs, []).append(k)
def get_refinability(v, dist_name): v = v.vals if dist_name == 'uniform': params = [mt_dist.uniform_get_low(v), mt_dist.uniform_get_high(v)] elif dist_name == 'normal': params = [mt_dist.normal_get_mu(v), mt_dist.normal_get_sigma(v)] elif dist_name == 'lognormal': params = [mt_dist.lognormal_get_mu(v), mt_dist.lognormal_get_sigma(v)] elif dist_name == 'quantized_lognormal': params = [mt_dist.quantized_lognormal_get_mu(v), mt_dist.quantized_lognormal_get_sigma(v), mt_dist.quantized_lognormal_get_round(v)] for p in params: try: tensor.get_constant_value(p) except TypeError: return False return True
def get_refinability(v, dist_name): v = v.vals if dist_name == 'uniform': params = [mt_dist.uniform_get_low(v), mt_dist.uniform_get_high(v)] elif dist_name == 'normal': params = [mt_dist.normal_get_mu(v), mt_dist.normal_get_sigma(v)] elif dist_name == 'lognormal': params = [mt_dist.lognormal_get_mu(v), mt_dist.lognormal_get_sigma(v)] elif dist_name == 'quantized_lognormal': params = [ mt_dist.quantized_lognormal_get_mu(v), mt_dist.quantized_lognormal_get_sigma(v), mt_dist.quantized_lognormal_get_round(v) ] for p in params: try: tensor.get_constant_value(p) except TypeError: return False return True