def get_sample(self, number_samples, input_values={}): #TODO: code duplication here reformatted_input_values = reformat_sampler_input(pandas_frame2dict(input_values), number_samples=number_samples) raw_sample = self._get_sample(number_samples, observed=False, input_values=reformatted_input_values, differentiable=False) sample = reformat_sample_to_pandas(raw_sample) return sample
def get_posterior_sample(self, number_samples, input_values={}): reformatted_input_values = reformat_sampler_input( pandas_frame2dict(input_values), number_samples=number_samples) raw_sample = self._get_posterior_sample( number_samples, input_values=reformatted_input_values) sample = reformat_sample_to_pandas(raw_sample) return sample
def get_sample(self, number_samples, input_values={}): reformatted_input_values = reformat_sampler_input(pandas_frame2dict(input_values), number_samples=number_samples) raw_sample = {self: self._get_sample(number_samples, resample=False, observed=self.is_observed, differentiable=False, input_values=reformatted_input_values)[self]} sample = reformat_sample_to_pandas(raw_sample) self.reset() return sample
def get_entropy(self, input_values={}): reformatted_input_values = reformat_sampler_input( pandas_frame2dict(input_values), number_samples=1) raw_ent = self._get_entropy(reformatted_input_values) ent = reformat_sample_to_pandas(raw_ent, number_samples=1) return ent
def get_variance(self, input_values={}): reformatted_input_values = reformat_sampler_input( pandas_frame2dict(input_values), number_samples=1) raw_variance = self._get_variance(reformatted_input_values) variance = reformat_sample_to_pandas(raw_variance, number_samples=1) return variance
def get_mean(self, input_values={}): reformatted_input_values = reformat_sampler_input( pandas_frame2dict(input_values), number_samples=1) raw_mean = self._get_mean(reformatted_input_values) mean = reformat_sample_to_pandas(raw_mean, number_samples=1) return mean
biased=False, number_post_samples=8000000) inference.perform_inference(model, inference_method=inference_method, number_iterations=1000, number_samples=50, optimizer="SGD", lr=0.0001, posterior_model=particles, pretraining_iterations=0) loss_list = model.diagnostics["loss curve"] # Local variational models plt.plot(loss_list) plt.show() # Samples print(inference_method.weights) M = 8000 samples = [reformat_sample_to_pandas(sampler._get_sample(M, max_itr=np.inf)) for sampler in inference_method.sampler_model] ensemble_histogram(samples, variable="theta", weights=inference_method.weights, bins=50) plt.show() #print([p.get_sample(1) for p in particles]) #print(initial_locations) #samples = Qtheta.get_sample(50) #print(samples)