Ejemplo n.º 1
0
 def setup(self):
     self.S = sample.sample_set()  # instantiate the class
     self.S.setup() # just get the default setup options (x ~ U[0,1]) 
     self.S.generate_samples()
     def model(params): # dummlen(self.P.accept_inds)y model that generalizes to arbitrary dimensions
         #return np.multiply(2,data)
         return 2*params
     self.P = sample.map_samples_and_create_problem(self.S, model)
Ejemplo n.º 2
0
    def test_set_ratio(self):
        r"""
        TODO: clean up this explanation. Someone else should be able to understand it... 
        This test uses uniform distributions. We set things up so that we expect
        (1/2)^input_dim accepted samples as a proportion. We accomplish this 
        with the linear map 2*np.eye(n) and uniform priors and observed dists with 
        supports that have measure 1 in the (n-1) dimensional Lebesgue measure, 
        giving their Radon-Nikodym Derivative (density) values of 1 as well. 
        Thus, our posterior density evaluation is 1 * 1*(1/n) = n over its support, 
        since the pushforward will have a support with measure n, 
        and once again, thus densities of (1/n) since we are dealing with probability measures.
        """
        print(
            '\n========== testing `sample.problem_set.set_ratio` ==========\n')

        def model(
            params
        ):  # dummlen(self.P.accept_inds)y model that generalizes to arbitrary dimensions
            #return np.multiply(2,data)
            return 2 * params

        err = []
        num_samples = 1000
        num_tests = 50

        for dim in [1, 2, 3]:
            ones = np.ones(dim)
            S = sample.sample_set()  # instantiate the class
            S.set_dim(dim)  # just get the default setup options (x ~ U[0,1])
            S.set_dist('uniform', {'loc': 0 * ones, 'scale': 1 * ones})
            S.generate_samples(num_samples)

            P = sample.map_samples_and_create_problem(S, model)
            print('num output samples', P.output.num_samples)
            P.compute_pushforward_dist()
            P.set_observed_dist('uniform', {
                'loc': 0.5 * ones,
                'scale': 1 * ones
            })
            P.set_ratio()
            print('checking size of ratio computation... shape = ',
                  P.ratio.shape)
            assert len(P.ratio.shape) == 1
            assert P.ratio.shape[0] == num_samples
Ejemplo n.º 3
0
    def test_accept_reject(self):
        print("\n========== testing `solve.perform_accept_reject` ==========\n")

        def model(
            params,
        ):  # dummlen(self.P.accept_inds)y model that generalizes to arbitrary dimensions
            # return np.multiply(2,data)
            return 2 * params

        num_samples = 2000
        num_tests = 50

        for dim in range(1, 6):
            err = []
            S = sample.sample_set()  # instantiate the class
            S.set_dim(dim)  # just get the default setup options (x ~ U[0,1])
            for j in range(dim):
                S.set_dist("uniform", {"loc": 0, "scale": 1}, j)
            S.generate_samples(num_samples)
            P = sample.map_samples_and_create_problem(S, model)
            P.compute_pushforward_dist()
            for j in range(dim):
                P.set_observed_dist("uniform", {"loc": 0.5, "scale": 1}, j)
            P.set_ratio()

            for seed in range(num_tests):
                solve.problem(P, seed=seed)
                err.append(
                    np.abs(
                        len(P.accept_inds) - (0.5 ** P.input.dim) * P.input.num_samples
                    )
                )
            avg_missed = np.mean(err)
            print(
                "dim %d: num of accepted inds out of %d trials in: %d"
                % (dim, num_tests, avg_missed)
            )
            assert (
                avg_missed < (0.05) * P.input.num_samples
            )  # want within 5% of num_samples
 err = []
 ones = np.ones(dim)
 # S = sample.sample_set()  # instantiate the object. Can also pass `size=(num_samples,dim)`
 # S.set_dim(dim) # set dimension
 # S.set_num_samples(num_samples) # set number of samples.
 # # # This is where the setup actually occurs # # #
 S = sample.sample_set(
     size=(num_samples, dim))  # ... Alternatively, the three lines above
 S.set_dist("uniform", {
     "loc": 0 * ones,
     "scale": 1 * ones
 })  # uniform priors in all directions. 'normal' also available.
 S.generate_samples(
 )  # generate samples, store them in the sample_set object.
 P = sample.map_samples_and_create_problem(
     S, model
 )  # map the samples, create new `sample_set` for outputs, put them together into a `problem_set` object.
 P.compute_pushforward_dist()  # gaussian_kde by default on the data space.
 P.set_observed_dist("uniform", {
     "loc": 0.5 * ones,
     "scale": 1 * ones
 })  # define your observed distribution.
 P.set_ratio(
 )  # compute ratio (evaluate your observed and pushforward densities)
 # solve the problem several times to get an expectation.
 for seed in range(num_tests):
     solve.problem(
         P, seed=seed)  # default: perform accept/reject. `method='AR'`
     err.append(
         np.abs(
             len(P.accept_inds) - (0.5**P.input.dim) * P.input.num_samples))
Ejemplo n.º 5
0
def sandbox(num_samples = int(1E4), lam_bound = [3,6], lam_0=3.5, 
t_0 = 0.1, Delta_t = 0.1, num_observations = 4, sd=1, 
fixed_noise = True, compare = False, smooth_post = False, fun_choice = 0, num_trials = 1):
    # NOTE this version only uses constant variances for the sake
    # of interactivity.
    # TODO overload sd variable to take in lists/arrays
    np.random.seed(0) # want deterministic results
    sigma = sd*np.ones(num_observations)
    
    t = np.linspace(t_0, t_0 + Delta_t*(num_observations-1), num_observations)
    if fun_choice == 0:
        def model(lam):
            return lam*np.exp(-t)
    elif fun_choice == 1: # fixed frequency
        def model(lam):
            lam_1 = 1.0
            return np.cos(lam_1*t + np.arccos(lam) )
    elif fun_choice == 2: # fixed initial condition
        def model(lam):
            lam_2 = 1
            return np.cos(lam*t + np.arccos(lam_2) )
    else:
        return None
        
    
    
    # Global options - Consistent over all the trials
    plt.rcParams['figure.figsize'] = (18, 6)
    plt.close('all')
    fig, (ax1, ax2, ax3) = plt.subplots(1,3)
    
    trial_seeds = [trial for trial in range(num_trials)] # seed each trial in the same numerical order
    entropy_list = []
    num_accept_list = []
    # in the case that we fix our noise-model:
    observed_data = model(lam_0) + np.random.randn(int(num_observations))*sigma
    # Instantiate the sample set object.
    S = samp.sample_set(size=(num_samples,1))
    a, b = lam_bound
    S.set_dist('uniform',{'loc':a, 'scale':b-a}) # same distribution object for all
    
    for seed in trial_seeds:
        if not fixed_noise: # if we change the noise model from run-to-run, recompute observed_data
            np.random.seed(seed)
            observed_data = model(lam_0) + np.random.randn(int(num_observations))*sigma
        
        # np.random.seed(seed)
        # Sample the Parameter Space
        S.generate_samples(seed=seed)
        lam = S.samples
        QoI_fun = SSE_generator(model, observed_data, sigma) # generates a function that just takes `lam` as input
        P = samp.map_samples_and_create_problem(S, QoI_fun)
        # Map to Data Space
        D = P.output.samples.transpose()
        
        P.compute_pushforward_dist() # gaussian_kde by default on the data space.
        pf_dens = P.pushforward_dist
        
        P.set_observed_dist('chi2', {'df':num_observations}, dim=0) # define your observed distribution.
        P.set_ratio() # compute ratio (evaluate your observed and pushforward densities)
        
        
    #     print('dimensions :  lambda = ' + str(lam.shape) + '   D = ' + str(D.shape) )
        # Perform KDE to estimate the pushforward
        # pf_dens = gauss_kde(D) # compute KDE estimate of it
        pf_dist = P.pushforward_dist
        
        # Specify Observed Measure - Uniform Density
        #obs_dist = sstats.uniform(0,uncertainty) # 1D only
        obs_dist = P.observed_dist
                
        # Solve the problem
        # r = obs_dists.pdf(D) / pf_dens.pdf(D) # vector of ratios evaluated at all the O(lambda)'s
        
        r = P.ratio
        M = np.max(r)
        eta_r = r/M
        
        if compare or smooth_post:
            if seed == 0:
                logging.info("""Performing Accept/Reject 
                to estimate the pushforward of the posterior.""")
            solve.problem(P, seed=seed)
            accept_inds = P.accept_inds
            num_accept = len(accept_inds)
            num_accept_list.append(num_accept)
            if num_accept < 10:
                logging.warn(("Less than ten samples were accepted for"
                    "`trial_seed` = %d Please increase the number of total"
                    "samples or the standard deviation.")%seed)
                smooth_flag = False
            else:
                smooth_flag = True
#         entropy_list.append( sstats.entropy( obs_dist.pdf(D), pf_dens.pdf(D) ) )    
        
        res = 50;
        max_x = D.max();
        # Plot stuff
        # plt.figure(1)
        x1 = np.linspace(-0.25, max_x, res)
        ax1.plot(x1, pf_dens.pdf(x1))
        plt.title('Pushforward Q(Prior)')
        plt.xlabel('Q(lambda)')
        
        x2 = np.linspace(0, max_x, res)
        ax2.plot(x2, obs_dist.pdf(x2))
        if compare:
            push_post_dens_kde = gauss_kde(D[:,accept_inds])
            pf = push_post_dens_kde.pdf(x2)
            ax2.plot(x2, pf, alpha=0.2)
#             ax2.legend(['Observed','Recovered'])
        plt.title('Observed Density')
        plt.xlabel('Q(lambda)')
        

        x3 = np.linspace(a,b, res)
        if smooth_post:
            input_dim = lam.shape[1] # get input dimension by observing the shape of lambda
            if smooth_flag:
                post_dens_kde = gauss_kde(lam[accept_inds,:].transpose())
                ps = post_dens_kde.pdf(x3)
                ax3.plot(x3, ps)
            
        else:    
            ax3.scatter(lam, eta_r)
        # plt.plot(lam_accept, gauss_kde(lam_accept))
        plt.scatter(lam_0, 0.05)
        plt.title('Posterior Distribution') #\nof Uniform Observed Density \nwith bound = %1.2e'%uncertainty)
        plt.xlabel('Lambda')
    #     plt.title('$\eta_r$')
        # # OPTIONAL:
        # pr = 0.2 # percentage view-window around true parameter.
    #     plt.xlim(lam0*np.array([1-pr,1+pr]))
        plt.xlim([a,b])
    
    plt.show()
    
#     print('\tMean Entropy is: %.2f with var %.2f'%(np.mean(entropy_list), np.std(entropy_list)))
#     print('Entropies: ')
    if compare or smooth_post:
#         print(['%.2f '%entropy_list[n] for n in range(num_trials)])
        print('Median Acceptance Rate: %2.2f%%'%(100*np.mean(num_accept_list)/num_samples) )