Example #1
0
 def setUp(self):
     """
     Set up problem.
     """
     import numpy.random as rnd
     rnd.seed(1)
     self.lam_domain=np.zeros((10,2))
     self.lam_domain[:,0]=0.0
     self.lam_domain[:,1]=1.0
     self.num_l_emulate = 1001
     self.lambda_emulate = calcP.emulate_iid_lebesgue(self.lam_domain, self.num_l_emulate)
     self.samples =  calcP.emulate_iid_lebesgue(self.lam_domain, 100)
     self.data = np.dot(self.samples,rnd.rand(10,4))
     Q_ref =  np.mean(self.data, axis=0)
     (self.d_distr_prob, self.d_distr_samples, self.d_Tree) = simpleFunP.uniform_hyperrectangle(data=self.data,Q_ref=Q_ref, bin_ratio=0.2, center_pts_per_edge = 1)
Example #2
0
    def setUp(self):
        """
        Test dimension, number of samples, and that all the samples are within
        lambda_domain.

        """
        lam_left = np.array([0.0, .25, .4])
        lam_right = np.array([1.0, 4.0, .5])
        lam_width = lam_right-lam_left

        self.lam_domain = np.zeros((3, 3))
        self.lam_domain[:, 0] = lam_left
        self.lam_domain[:, 1] = lam_right

        num_samples_dim = 2
        start = lam_left+lam_width/(2*num_samples_dim)
        stop = lam_right-lam_width/(2*num_samples_dim)
        d1_arrays = []
        
        for l, r in zip(start, stop):
            d1_arrays.append(np.linspace(l, r, num_samples_dim))

        self.num_l_emulate = 1000001

        self.lambda_emulate = calcP.emulate_iid_lebesgue(self.lam_domain,
                self.num_l_emulate)
        self.samples = util.meshgrid_ndim(d1_arrays)
        self.volume_exact = 1.0/self.samples.shape[0]
        self.lam_vol, self.lam_vol_local, self.local_index = calcP.\
                estimate_volume(self.samples, self.lambda_emulate)
Example #3
0
    def runTest(self):
        """
        Test dimension, number of samples, and that all the samples are within
        lambda_domain.

        """
        lam_left = np.array([0.0, .25, .4])
        lam_right = np.array([1.0, 4.0, .5])

        lam_domain = np.zeros((3,3))
        lam_domain[:,0] = lam_left
        lam_domain[:,1] = lam_right

        num_l_emulate = 1e6

        lambda_emulate = calcP.emulate_iid_lebesgue(lam_domain, num_l_emulate)

        # check the dimension
        np.assert_array_equal(lambda_emulate.shape, (3, num_l_emulate))

        # check that the samples are all within the correct bounds
        np.assertGreaterEqual(0.0, np.min(lambda_emulate[0, :]))
        np.assertGreaterEqual(.25, np.min(lambda_emulate[1, :]))
        np.assertGreaterEqual(.4, np.min(lambda_emulate[2, :]))
        np.assertLessEqual(1.0, np.max(lambda_emulate[0, :]))
        np.assertLessEqual(4.0, np.max(lambda_emulate[1, :]))
        np.assertLessEqual(.5, np.max(lambda_emulate[2, :]))
Example #4
0
def postprocess(station_nums, ref_num):
    
    filename = 'P_q'+str(station_nums[0]+1)+'_q'+str(station_nums[1]+1)
    if len(station_nums) == 3:
        filename += '_q'+str(station_nums[2]+1)
    filename += '_ref_'+str(ref_num+1)

    data = Q[:, station_nums]
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    (rho_D_M, d_distr_samples, d_Tree) = sfun.uniform_hyperrectangle(data,
            q_ref, bin_ratio=0.15,
            center_pts_per_edge=np.ones((data.shape[1],)))

    num_l_emulate = 1e6
    lambda_emulate = calcP.emulate_iid_lebesgue(lam_domain, num_l_emulate)
    
    if comm.rank == 0:
        print "Finished emulating lambda samples"
        mdict = dict()
        mdict['rho_D_M'] = rho_D_M
        mdict['d_distr_samples'] = d_distr_samples 
        mdict['num_l_emulate'] = num_l_emulate

    # Calculate P on lambda emulate
    (P0, lem0, io_ptr0, emulate_ptr0) = calcP.prob_emulated(samples, data,
            rho_D_M, d_distr_samples, lambda_emulate, d_Tree)
    if comm.rank == 0:
        print "Calculating prob_emulated"
        mdict['P0'] = P0
        mdict['lem0'] = lem0
        mdict['io_ptr0'] = io_ptr0
        mdict['emulate_ptr0'] = emulate_ptr0

    # Calclate P on the actual samples with assumption that voronoi cells have
    # equal size
    (P1, lam_vol1, io_ptr1) = calcP.prob(samples, data,
            rho_D_M, d_distr_samples, d_Tree)
    if comm.rank == 0:
        print "Calculating prob"
        mdict['P1'] = P1
        mdict['lam_vol1'] = lam_vol1
        mdict['lem1'] = samples
        mdict['io_ptr1'] = io_ptr1

    # Calculate P on the actual samples estimating voronoi cell volume with MC
    # integration
    (P3, lam_vol3, lambda_emulate3, io_ptr3, emulate_ptr3) = calcP.prob_mc(samples,
            data, rho_D_M, d_distr_samples, lambda_emulate, d_Tree)
    if comm.rank == 0:
        print "Calculating prob_mc"
        mdict['P3'] = P3
        mdict['lam_vol3'] = lam_vol3
        mdict['io_ptr3'] = io_ptr3
        mdict['emulate_ptr3'] = emulate_ptr3
        # Export P
        sio.savemat(filename, mdict, do_compression=True)
Example #5
0
def postprocess(station_nums, ref_num):
    
    filename = 'P_q'+str(station_nums[0]+1)+'_q'
    if len(station_nums) == 3:
        filename += '_q'+str(station_nums[2]+1)
    filename += '_ref_'+str(ref_num+1)

    data = Q[:, station_nums]
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    (rho_D_M, d_distr_samples, d_Tree) = sfun.uniform_hyperrectangle(data,
            q_ref, bin_ratio=0.15,
            center_pts_per_edge=np.ones((data.shape[1],)))

    num_l_emulate = 1e6
    lambda_emulate = calcP.emulate_iid_lebesgue(lam_domain, num_l_emulate)
    print "Finished emulating lambda samples"

    mdict = dict()
    mdict['rho_D_M'] = rho_D_M
    mdict['d_distr_samples'] = d_distr_samples 
    mdict['num_l_emulate'] = num_l_emulate
    mdict['lambda_emulate'] = lambda_emulate

    # Calculate P on lambda emulate
    (P0, lem0, io_ptr0, emulate_ptr0) = calcP.prob_emulated(samples, data,
            rho_D_M, d_distr_samples, lambda_emulate, d_Tree)
    print "Calculating prob_emulated"
    mdict['P0'] = P0
    mdict['lem0'] = lem0
    mdict['io_ptr0'] = io_ptr0
    mdict['emulate_ptr0'] = emulate_ptr0

    # Calclate P on the actual samples with assumption that voronoi cells have
    # equal size
    (P1, lam_vol1, io_ptr1) = calcP.prob(samples, data,
            rho_D_M, d_distr_samples, d_Tree)
    print "Calculating prob"
    mdict['P1'] = P1
    mdict['lam_vol1'] = lam_vol1
    mdict['lem1'] = samples
    mdict['io_ptr1'] = io_ptr1

    # Calculate P on the actual samples estimating voronoi cell volume with MC
    # integration
    (P3, lam_vol3, lambda_emulate3, io_ptr3, emulate_ptr3) = calcP.prob_mc(samples,
            data, rho_D_M, d_distr_samples, lambda_emulate, d_Tree)
    print "Calculating prob_mc"
    mdict['P3'] = P3
    mdict['lam_vol3'] = lam_vol3
    mdict['io_ptr3'] = io_ptr3
    mdict['emulate_ptr3'] = emulate_ptr3
    # Export P
    sio.savemat(filename, mdict, do_compression=True)
Example #6
0
 def setUp(self):
     """
     Set up problem.
     """
     import numpy.random as rnd
     rnd.seed(1)
     self.lam_domain = np.zeros((10, 2))
     self.lam_domain[:, 0] = 0.0
     self.lam_domain[:, 1] = 1.0
     self.num_l_emulate = 1001
     self.lambda_emulate = calcP.emulate_iid_lebesgue(
         self.lam_domain, self.num_l_emulate)
     self.samples = calcP.emulate_iid_lebesgue(self.lam_domain, 100)
     self.data = np.dot(self.samples, rnd.rand(10, 4))
     Q_ref = np.mean(self.data, axis=0)
     (self.d_distr_prob, self.d_distr_samples,
      self.d_Tree) = simpleFunP.uniform_hyperrectangle(
          data=self.data, Q_ref=Q_ref, bin_ratio=0.2, center_pts_per_edge=1)
Example #7
0
 def setUp(self):
     self.samples = np.loadtxt(data_path + "/3to2_samples.txt.gz")
     self.data = np.loadtxt(data_path + "/3to2_data.txt.gz")
     Q_ref =  np.array([0.422, 0.9385])
     (self.d_distr_prob, self.d_distr_samples, self.d_Tree) = simpleFunP.uniform_hyperrectangle(data=self.data,Q_ref=Q_ref, bin_ratio=0.2, center_pts_per_edge = 1)
     self.lam_domain= np.array([[0.0, 1.0],
                                [0.0, 1.0],
                                [0.0, 1.0]])
     import numpy.random as rnd
     rnd.seed(1)
     self.lambda_emulate = calcP.emulate_iid_lebesgue(lam_domain=self.lam_domain, 
                                                      num_l_emulate = 1001)
Example #8
0
 def setUp(self):
     self.samples = np.loadtxt(data_path + "/3to2_samples.txt.gz")
     self.data = np.loadtxt(data_path + "/3to2_data.txt.gz")
     Q_ref = np.array([0.422, 0.9385])
     (self.d_distr_prob, self.d_distr_samples,
      self.d_Tree) = simpleFunP.uniform_hyperrectangle(
          data=self.data, Q_ref=Q_ref, bin_ratio=0.2, center_pts_per_edge=1)
     self.lam_domain = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]])
     import numpy.random as rnd
     rnd.seed(1)
     self.lambda_emulate = calcP.emulate_iid_lebesgue(
         lam_domain=self.lam_domain, num_l_emulate=1001)
Example #9
0
    def setUp(self):
        """
        Test dimension, number of samples, and that all the samples are within
        lambda_domain.

        """
        lam_left = np.array([0.0, .25, .4])
        lam_right = np.array([1.0, 4.0, .5])

        self.lam_domain = np.zeros((3,3))
        self.lam_domain[:,0] = lam_left
        self.lam_domain[:,1] = lam_right

        self.num_l_emulate = 1000001

        self.lambda_emulate = calcP.emulate_iid_lebesgue(self.lam_domain, self.num_l_emulate)
Example #10
0
    def setUp(self):
        """
        Test dimension, number of samples, and that all the samples are within
        lambda_domain.

        """
        lam_left = np.array([0.0, .25, .4])
        lam_right = np.array([1.0, 4.0, .5])

        self.lam_domain = np.zeros((3, 3))
        self.lam_domain[:, 0] = lam_left
        self.lam_domain[:, 1] = lam_right

        self.num_l_emulate = 1000001

        self.lambda_emulate = calcP.emulate_iid_lebesgue(
            self.lam_domain, self.num_l_emulate)
Example #11
0
def postprocess(station_nums, ref_num):
    
    filename = 'P_q'+str(station_nums[0]+1)+'_q'+str(station_nums[1]+1)
    if len(station_nums) == 3:
        filename += '_q'+str(station_nums[2]+1)
    filename += '_truth_'+str(ref_num+1)

    data = Q[:, station_nums]
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    (rho_D_M, d_distr_samples, d_Tree) = sfun.uniform_hyperrectangle(data,
            q_ref, bin_ratio=0.15,
            center_pts_per_edge=np.ones((data.shape[1],)))

    num_l_emulate = 1e6
    lambda_emulate = calcP.emulate_iid_lebesgue(lam_domain, num_l_emulate)
    print "Finished emulating lambda samples"

    # Calculate P on the actual samples estimating voronoi cell volume with MC
    # integration
    (P3, lam_vol3, lambda_emulate3, io_ptr3, emulate_ptr3) = calcP.prob_mc(samples,
            data, rho_D_M, d_distr_samples, lam_domain, lambda_emulate, d_Tree)
    print "Calculating prob_mc"
    mdict = dict()
    mdict['rho_D_M'] = rho_D_M
    mdict['d_distr_samples'] = d_distr_samples 
    mdict['lambda_emulate'] = util.get_global_values(lambda_emulate)   
    mdict['num_l_emulate'] = mdict['lambda_emulate'].shape[1]
    mdict['P3'] = util.get_global_values(P3)
    mdict['lam_vol3'] = util.get_global_values(lam_vol3)
    mdict['io_ptr3'] = util.get_global_values(io_ptr3)
    mdict['emulate_ptr3'] = emulate_ptr3
        
    if rank == 0:
        # Export P and compare to MATLAB solution visually
        sio.savemat(filename, mdict, do_compression=True)
that are approximately 10x the number of samples.

Note that you can always use

  lambda_emulate = samples

and this simply will imply that a standard Monte Carlo assumption is
being used, which in a measure-theoretic context implies that each
Voronoi cell is assumed to have the same measure. This type of
approximation is more reasonable for large n_samples due to the slow
convergence rate of Monte Carlo (it converges like 1/sqrt(n_samples)).
'''
if random_sample is False:
    lambda_emulate = samples
else:
    lambda_emulate = calculateP.emulate_iid_lebesgue(
        lam_domain=lam_domain, num_l_emulate=1E5)


# calculate probablities
(P, lambda_emulate, io_ptr, emulate_ptr) = \
    calculateP.prob_emulated(samples=samples,
                             data=data,
                             rho_D_M=d_distr_prob,
                             d_distr_samples=d_distr_samples,
                             lambda_emulate=lambda_emulate,
                             d_Tree=d_Tree)

# calculate 2d marginal probs
'''
Suggested changes for user: