def _sample_constraint_ls(self, comp, vals): def lpSigmoid(ff, gain=self.constraint_gain): probs = 1. / (1. + np.exp(-gain * ff)) probs[probs <= 0] = 1e-12 probs[probs >= 1] = 1 - 1e-12 llh = np.sum(vals * np.log(probs) + (1 - vals) * np.log(1 - probs)) return llh def updateGain(gain): if gain < 0.01 or gain > 10: return -np.inf cov = self.constraint_amp2 * ( self.cov_func(self.constraint_ls, comp, None) + 1e-6 * np.eye(comp.shape[0])) + self.constraint_noise * np.eye( comp.shape[0]) chol = spla.cholesky(cov, lower=True) solve = spla.cho_solve((chol, True), vals) # - self.constraint_mean) #lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(self.ff, solve) lp = lpSigmoid(self.ff, gain) return lp def logprob(ls): if np.any(ls < 0) or np.any(ls > self.constraint_max_ls): return -np.inf cov = self.constraint_amp2 * ( self.cov_func(ls, comp, None) + 1e-6 * np.eye(comp.shape[0]) ) + self.constraint_noise * np.eye(comp.shape[0]) chol = spla.cholesky(cov, lower=True) solve = spla.cho_solve((chol, True), self.ff) # - self.constraint_mean) #lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(self.ff, solve) lp = lpSigmoid(self.ff) return lp #hypers = util.slice_sample(np.hstack((self.constraint_ls, self.ff)), logprob, compwise=True) hypers = util.slice_sample(self.constraint_ls, logprob, compwise=True) self.constraint_ls = hypers cov = self.constraint_amp2 * ( self.cov_func(self.constraint_ls, comp, None) + 1e-6 * np.eye( comp.shape[0])) + self.constraint_noise * np.eye(comp.shape[0]) chol = spla.cholesky(cov, lower=False) ff = self.ff for jj in xrange(20): (ff, lpell) = self.elliptical_slice(ff, chol, lpSigmoid) self.ff = ff # Update gain hypers = util.slice_sample(np.array([self.constraint_gain]), updateGain, compwise=True) self.constraint_gain = hypers
def _sample_constraint_ls(self, comp, vals): def lpProbit(ff, gain=self.constraint_gain): probs = sps.norm.cdf(ff * gain) probs[probs <= 0] = 1e-12 probs[probs >= 1] = 1 - 1e-12 llh = np.sum(vals * np.log(probs) + (1 - vals) * np.log(1 - probs)) return llh def lpSigmoid(ff, gain=self.constraint_gain): probs = 1.0 / (1.0 + np.exp(-gain * ff)) probs[probs <= 0] = 1e-12 probs[probs >= 1] = 1 - 1e-12 llh = np.sum(vals * np.log(probs) + (1 - vals) * np.log(1 - probs)) return llh def updateGain(gain): if gain < 0.01 or gain > 10: return -np.inf cov = self.constraint_amp2 * ( self.cov_func(self.constraint_ls, comp, None) + 1e-6 * np.eye(comp.shape[0]) ) + self.constraint_noise * np.eye(comp.shape[0]) chol = spla.cholesky(cov, lower=True) solve = spla.cho_solve((chol, True), vals) lp = lpProbit(self.ff, gain) return lp def logprob(ls): if np.any(ls < 0) or np.any(ls > self.constraint_max_ls): return -np.inf cov = self.constraint_amp2 * ( self.cov_func(ls, comp, None) + 1e-6 * np.eye(comp.shape[0]) ) + self.constraint_noise * np.eye(comp.shape[0]) chol = spla.cholesky(cov, lower=True) solve = spla.cho_solve((chol, True), self.ff) lp = lpProbit(self.ff) return lp hypers = util.slice_sample(self.constraint_ls, logprob, compwise=True) self.constraint_ls = hypers cov = self.constraint_amp2 * ( self.cov_func(self.constraint_ls, comp, None) + 1e-6 * np.eye(comp.shape[0]) ) + self.constraint_noise * np.eye(comp.shape[0]) chol = spla.cholesky(cov, lower=False) ff = self.ff for jj in range(20): (ff, lpell) = self.elliptical_slice(ff, chol, lpProbit) self.ff = ff # Update gain hypers = util.slice_sample(np.array([self.constraint_gain]), updateGain, compwise=True) self.constraint_gain = hypers[0]
def _sample_constraint_ls(self, comp, vals): def lpSigmoid(ff, gain=self.constraint_gain): probs = 1./(1. + np.exp(-gain*ff)); probs[probs <= 0] = 1e-12 probs[probs >= 1] = 1-1e-12 llh = np.sum(vals*np.log(probs) + (1-vals)*np.log(1-probs)); return llh def updateGain(gain): if gain < 0.01 or gain > 10: return -np.inf cov = self.constraint_amp2 * (self.cov_func(self.constraint_ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + self.constraint_noise*np.eye(comp.shape[0]) chol = spla.cholesky(cov, lower=True) solve = spla.cho_solve((chol, True), vals)# - self.constraint_mean) #lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(self.ff, solve) lp = lpSigmoid(self.ff, gain) return lp def logprob(ls): if np.any(ls < 0) or np.any(ls > self.constraint_max_ls): return -np.inf cov = self.constraint_amp2 * (self.cov_func(ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + self.constraint_noise*np.eye(comp.shape[0]) chol = spla.cholesky(cov, lower=True) solve = spla.cho_solve((chol, True), self.ff)# - self.constraint_mean) #lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(self.ff, solve) lp = lpSigmoid(self.ff) return lp #hypers = util.slice_sample(np.hstack((self.constraint_ls, self.ff)), logprob, compwise=True) hypers = util.slice_sample(self.constraint_ls, logprob, compwise=True) self.constraint_ls = hypers cov = self.constraint_amp2 * (self.cov_func(self.constraint_ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + self.constraint_noise*np.eye(comp.shape[0]) chol = spla.cholesky(cov, lower=False) ff = self.ff for jj in xrange(20): (ff, lpell) = self.elliptical_slice(ff, chol, lpSigmoid); self.ff = ff # Update gain hypers = util.slice_sample(np.array([self.constraint_gain]), updateGain, compwise=True) self.constraint_gain = hypers
def _sample_noiseless(self, comp, vals): def logprob(hypers): mean = hypers[0] amp2 = hypers[1] noise = 1e-3 # This is pretty hacky, but keeps things sane. if mean > np.max(vals) or mean < np.min(vals): return -np.inf if amp2 < 0: return -np.inf cov = (amp2 * (self.cov_func(self.ls, comp, None) + 1e-6 * np.eye(comp.shape[0])) + noise * np.eye(comp.shape[0])) chol = spla.cholesky(cov, lower=True) solve = spla.cho_solve((chol, True), vals - mean) lp = -np.sum(np.log(np.diag(chol))) - 0.5 * np.dot( vals - mean, solve) # Roll in amplitude lognormal prior lp -= 0.5 * (np.log(amp2) / self.amp2_scale)**2 return lp hypers = util.slice_sample(np.array([self.mean, self.amp2, self.noise]), logprob, compwise=False) self.mean = hypers[0] self.amp2 = hypers[1] self.noise = 1e-3
def _sample_noiseless(self, comp, vals): def logprob(hypers): mean = hypers[0] amp2 = hypers[1] noise = 1e-3 # This is pretty hacky, but keeps things sane. if mean > np.max(vals) or mean < np.min(vals): return -np.inf if amp2 < 0: return -np.inf cov = amp2 * (self.cov_func(self.ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + noise*np.eye(comp.shape[0]) chol = spla.cholesky(cov, lower=True) solve = spla.cho_solve((chol, True), vals - mean) lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(vals-mean, solve) # Roll in amplitude lognormal prior lp -= 0.5*(np.log(amp2)/self.amp2_scale)**2 return lp hypers = util.slice_sample(np.array([self.mean, self.amp2, self.noise]), logprob, compwise=False) self.mean = hypers[0] self.amp2 = hypers[1] self.noise = 1e-3
def sample_from_proposal_measure(starting_point, log_proposal_measure, number_of_points, chain_length=20): ''' Samples points from a proposal measure. Args: starting_point: The point where to start the sampling. log_proposal_measure: A function that measures in log-scale how suitable a point is to represent Pmin. number_of_points: The number of samples to draw. Returns: a numpy array containing the desired number of samples ''' representer_points = np.zeros([number_of_points, starting_point.shape[0]]) #TODO: burnin? for i in range(0, number_of_points): #this for loop ensures better mixing for c in range(0, chain_length): try: starting_point = slice_sample(starting_point, log_proposal_measure) except Exception as e: starting_point = handle_slice_sampler_exception(e, starting_point, log_proposal_measure) representer_points[i] = starting_point return representer_points
def handle_slice_sampler_exception(exception, starting_point, proposal_measure, opt_compwise=False): ''' Handles slice sampler exceptions. If the slice sampler shrank to zero the slice sampler will be restarted a few times. If this fails or if the exception was another this method will raise the given exception. Args: exception: the exception that occured starting_point: the starting point that was used proposal_measure: the used proposal measure opt_compwise: how to set the compwise option Returns: the output of the slice sampelr Raises: Exception: the first argument ''' if exception.message == "Slice sampler shrank to zero!": log("Slice sampler shrank to zero! Action: trying to restart " + str(NUMBER_OF_RESTARTS) + " times with same starting point") restarts_left = NUMBER_OF_RESTARTS while restarts_left > 0: try: return slice_sample(starting_point, proposal_measure, compwise=opt_compwise) except Exception as e: log("Restart failed. " + str(restarts_left) + " restarts left. Exception was: " + e.message) restarts_left = restarts_left - 1 # if we leave the while loop we will raise the exception we got raise exception
def _sample_constraint_noisy(self, comp, vals): def lpSigmoid(ff, gain=self.constraint_gain): probs = 1. / (1. + np.exp(-gain * ff)) probs[probs <= 0] = 1e-12 probs[probs >= 1] = 1 - 1e-12 llh = np.sum(vals * np.log(probs) + (1 - vals) * np.log(1 - probs)) return llh def logprob(hypers): #mean = hypers[0] amp2 = hypers[0] #gain = hypers[2] ff = hypers[1:] # This is pretty hacky, but keeps things sane. #if mean > np.max(vals) or mean < np.min(vals): # return -np.inf if amp2 < 0: return -np.inf noise = self.constraint_noise cov = amp2 * (self.cov_func(self.constraint_ls, comp, None) + 1e-6 * np.eye(comp.shape[0])) + noise * np.eye( comp.shape[0]) chol = spla.cholesky(cov, lower=True) solve = spla.cho_solve((chol, True), ff) # - mean) #lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(ff-mean, solve) lp = -np.sum(np.log(np.diag(chol))) - 0.5 * np.dot(ff, solve) # Roll in noise horseshoe prior. #lp += np.log(np.log(1 + (self.constraint_noise_scale/noise)**2)) #lp -= 0.5*(np.log(noise)/self.constraint_noise_scale)**2 # Roll in amplitude lognormal prior lp -= 0.5 * (np.log(amp2) / self.constraint_amp2_scale)**2 #lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(self.ff, solve) lp += lpSigmoid(ff, self.constraint_gain) return lp hypers = util.slice_sample(np.hstack( (np.array([self.constraint_amp2]), self.ff)), logprob, compwise=False) #self.constraint_mean = hypers[0] self.constraint_amp2 = hypers[0] #self.constraint_gain = hypers[2] self.ff = hypers[1:] cov = self.constraint_amp2 * ( self.cov_func(self.constraint_ls, comp, None) + 1e-6 * np.eye( comp.shape[0])) + self.constraint_noise * np.eye(comp.shape[0]) chol = spla.cholesky(cov, lower=False) ff = self.ff for jj in xrange(50): (ff, lpell) = self.elliptical_slice(ff, chol, lpSigmoid) self.ff = ff
def _sample_time_ls(self, comp, vals): def logprob(ls): if np.any(ls < 0) or np.any(ls > self.time_max_ls): return -np.inf cov = self.time_amp2 * (self.cov_func(ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + self.time_noise*np.eye(comp.shape[0]) chol = spla.cholesky(cov, lower=True) solve = spla.cho_solve((chol, True), vals - self.time_mean) lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(vals-self.time_mean, solve) return lp self.time_ls = util.slice_sample(self.time_ls, logprob, compwise=True)
def _sample_constraint_noisy(self, comp, vals): def lpSigmoid(ff,gain=self.constraint_gain): probs = 1./(1. + np.exp(-gain*ff)); probs[probs <= 0] = 1e-12 probs[probs >= 1] = 1-1e-12 llh = np.sum(vals*np.log(probs) + (1-vals)*np.log(1-probs)); return llh def logprob(hypers): #mean = hypers[0] amp2 = hypers[0] #gain = hypers[2] ff = hypers[1:] # This is pretty hacky, but keeps things sane. #if mean > np.max(vals) or mean < np.min(vals): # return -np.inf if amp2 < 0: return -np.inf noise = self.constraint_noise cov = amp2 * (self.cov_func(self.constraint_ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + noise*np.eye(comp.shape[0]) chol = spla.cholesky(cov, lower=True) solve = spla.cho_solve((chol, True), ff)# - mean) #lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(ff-mean, solve) lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(ff, solve) # Roll in noise horseshoe prior. #lp += np.log(np.log(1 + (self.constraint_noise_scale/noise)**2)) #lp -= 0.5*(np.log(noise)/self.constraint_noise_scale)**2 # Roll in amplitude lognormal prior lp -= 0.5*(np.log(amp2)/self.constraint_amp2_scale)**2 #lp = -np.sum(np.log(np.diag(chol)))-0.5*np.dot(self.ff, solve) lp += lpSigmoid(ff,self.constraint_gain) return lp hypers = util.slice_sample(np.hstack((np.array([self.constraint_amp2]), self.ff)), logprob, compwise=False) #self.constraint_mean = hypers[0] self.constraint_amp2 = hypers[0] #self.constraint_gain = hypers[2] self.ff = hypers[1:] cov = self.constraint_amp2 * (self.cov_func(self.constraint_ls, comp, None) + 1e-6*np.eye(comp.shape[0])) + self.constraint_noise*np.eye(comp.shape[0]) chol = spla.cholesky(cov, lower=False) ff = self.ff for jj in xrange(50): (ff, lpell) = self.elliptical_slice(ff, chol, lpSigmoid); self.ff = ff
def _sample_constraint_noisy(self, comp, vals): def lpProbit(ff, gain=self.constraint_gain): probs = sps.norm.cdf(ff * gain) probs[probs <= 0] = 1e-12 probs[probs >= 1] = 1 - 1e-12 llh = np.sum(vals * np.log(probs) + (1 - vals) * np.log(1 - probs)) if np.any(np.isnan(probs)): print(probs) return llh def lpSigmoid(ff, gain=self.constraint_gain): probs = 1.0 / (1.0 + np.exp(-gain * ff)) probs[probs <= 0] = 1e-12 probs[probs >= 1] = 1 - 1e-12 llh = np.sum(vals * np.log(probs) + (1 - vals) * np.log(1 - probs)) return llh def logprob(hypers): amp2 = hypers[0] ff = hypers[1:] if amp2 < 0: return -np.inf noise = self.constraint_noise cov = amp2 * ( self.cov_func(self.constraint_ls, comp, None) + 1e-6 * np.eye(comp.shape[0]) ) + noise * np.eye(comp.shape[0]) chol = spla.cholesky(cov, lower=True) solve = spla.cho_solve((chol, True), ff) lp = -np.sum(np.log(np.diag(chol))) - 0.5 * np.dot(ff, solve) # Roll in amplitude lognormal prior lp -= 0.5 * (np.log(amp2) / self.constraint_amp2_scale) ** 2 lp += lpProbit(ff, self.constraint_gain) return lp hypers = util.slice_sample(np.hstack((np.array([self.constraint_amp2]), self.ff)), logprob, compwise=False) self.constraint_amp2 = hypers[0] self.ff = hypers[1:] cov = self.constraint_amp2 * ( (self.cov_func(self.constraint_ls, comp, None) + 1e-6 * np.eye(comp.shape[0])) + self.constraint_noise * np.eye(comp.shape[0]) ) chol = spla.cholesky(cov, lower=False) ff = self.ff for jj in range(50): (ff, lpell) = self.elliptical_slice(ff, chol, lpProbit) self.ff = ff
def _sample_ls(comp, vals, cov_func, start_point, mean, amp2, noise): def logprob(ls): if np.any(ls < 0) or np.any(ls > MAX_LS): return -np.inf cov = (amp2 * (cov_func(ls, comp, None) + 1e-6 * np.eye(comp.shape[0])) + noise * np.eye(comp.shape[0])) chol = spla.cholesky(cov, lower=True) solve = spla.cho_solve((chol, True), vals - mean) lp = (-np.sum(np.log(np.diag(chol))) - 0.5 * np.dot(vals - mean, solve)) return lp try: return slice_sample(start_point, logprob, compwise=True) except Exception as e: return handle_slice_sampler_exception(e, start_point, logprob, True)
def _sample_mean_amp_noise(comp, vals, cov_func, start_point, ls): default_noise = 1e-3 #if we get a start point that consists only of two variables that means we don't care for the noise noiseless = (start_point.shape[0] == 2) def logprob(hypers): mean = hypers[0] amp2 = hypers[1] if not noiseless: noise = hypers[2] else: noise = default_noise # This is pretty hacky, but keeps things sane. if mean > np.max(vals) or mean < np.min(vals): return -np.inf if amp2 < 0 or noise < 0: return -np.inf cov = (amp2 * (cov_func(ls, comp, None) + 1e-6 * np.eye(comp.shape[0])) + noise * np.eye(comp.shape[0])) chol = spla.cholesky(cov, lower=True) solve = spla.cho_solve((chol, True), vals - mean) lp = -np.sum(np.log(np.diag(chol))) - 0.5 * np.dot(vals - mean, solve) if not noiseless: # Roll in noise horseshoe prior. lp += np.log(np.log(1 + (NOISE_SCALE / noise) ** 2)) # Roll in amplitude lognormal prior lp -= 0.5 * (np.log(amp2) / AMP2_SCALE) ** 2 #print "LP: " + str(lp) return lp try: return slice_sample(start_point, logprob, compwise=False) except Exception as e: return handle_slice_sampler_exception(e, start_point, logprob, False)