Пример #1
0
    def setUp(self):
        """
        Setup map.
        """
        param_ref = np.array([0.5, 0.5, 0.5])
        Q_ref = linear_model1(param_ref)

        sampler = bsam.sampler(linear_model1)
        input_samples = sample.sample_set(3)
        input_samples.set_domain(np.repeat([[0.0, 1.0]], 3, axis=0))
        input_samples = sampler.random_sample_set('random',
                                                  input_samples,
                                                  num_samples=1E2)
        disc = sampler.compute_QoI_and_create_discretization(input_samples,
                                                             globalize=True)
        simpleFunP.regular_partition_uniform_distribution_rectangle_scaled(
            data_set=disc, Q_ref=Q_ref, rect_scale=0.5)
        num = disc.check_nums()
        disc._output_sample_set.set_error_estimates(0.01 * np.ones((num, 2)))
        jac = np.zeros((num, 2, 3))
        jac[:, :, :] = np.array([[0.506, 0.463], [0.253, 0.918],
                                 [0.085, 0.496]]).transpose()

        disc._input_sample_set.set_jacobians(jac)
        self.sur = surrogates.piecewise_polynomial_surrogate(disc)
Пример #2
0
def postprocess(station_nums, ref_num):
    
    filename = 'P_q'+str(station_nums[0]+1)+'_q'+str(station_nums[1]+1)
    if len(station_nums) == 3:
        filename += '_q'+str(station_nums[2]+1)
    filename += '_ref_'+str(ref_num+1)

    data = Q[:, station_nums]
    output_sample_set = sample.sample_set(data.shape[1])
    output_sample_set.set_values(data)
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    output_probability_set = sfun.regular_partition_uniform_distribution_rectangle_scaled(\
            output_sample_set, q_ref, rect_scale=0.15,
            cells_per_dimension=np.ones((data.shape[1],)))

    my_disc = sample.discretization(input_sample_set, output_sample_set,
            output_probability_set)

    # Calclate P on the actual samples with assumption that voronoi cells have
    # equal size
    input_sample_set.estimate_volume_mc()
    print "Calculating prob"
    calcP.prob(my_disc)
    sample.save_discretization(my_disc, filename, "prob_solution")
Пример #3
0
    def setUp(self):
        """
        Set up problem.
        """

        import numpy.random as rnd
        rnd.seed(1)
        self.inputs = samp.sample_set(1)
        self.outputs = samp.sample_set(1)
        self.lam_domain = np.zeros((1, 2))
        self.lam_domain[:, 0] = 0.0
        self.lam_domain[:, 1] = 1.0
        self.inputs.set_domain(self.lam_domain)
        self.inputs.set_values(rnd.rand(100, ))
        self.num_l_emulate = 1001
        self.inputs = bsam.random_sample_set('r',
                                             self.inputs.get_domain(),
                                             num_samples=1001,
                                             globalize=True)
        self.outputs.set_values(2.0 * self.inputs._values)
        Q_ref = np.mean(self.outputs._values, axis=0)
        self.inputs_emulated = bsam.random_sample_set(
            'r',
            self.inputs.get_domain(),
            num_samples=self.num_l_emulate,
            globalize=True)
        self.output_prob = simpleFunP.regular_partition_uniform_distribution_rectangle_scaled(
            self.outputs, Q_ref=Q_ref, rect_scale=0.2, cells_per_dimension=1)
        self.disc = samp.discretization(
            input_sample_set=self.inputs,
            output_sample_set=self.outputs,
            output_probability_set=self.output_prob,
            emulated_input_sample_set=self.inputs_emulated)
Пример #4
0
    def setUp(self):
        """
        Set up problem.
        """

        import numpy.random as rnd
        rnd.seed(1)
        self.inputs = samp.sample_set(1)
        self.outputs = samp.sample_set(1)
        self.lam_domain = np.zeros((1, 2))
        self.lam_domain[:, 0] = 0.0
        self.lam_domain[:, 1] = 1.0
        self.inputs.set_domain(self.lam_domain)
        self.inputs.set_values(rnd.rand(100,))
        self.num_l_emulate = 1001
        self.inputs = bsam.random_sample_set('r',
                                             self.inputs.get_domain(), num_samples=1001, globalize=True)
        self.outputs.set_values(2.0*self.inputs._values)
        Q_ref = np.mean(self.outputs._values, axis=0)
        self.inputs_emulated = bsam.random_sample_set('r',
                                                      self.inputs.get_domain(), num_samples=self.num_l_emulate,
                                                      globalize=True)
        self.output_prob = simpleFunP.regular_partition_uniform_distribution_rectangle_scaled(
            self.outputs, Q_ref=Q_ref, rect_scale=0.2, cells_per_dimension=1)
        self.disc = samp.discretization(input_sample_set=self.inputs,
                                        output_sample_set=self.outputs,
                                        output_probability_set=self.output_prob,
                                        emulated_input_sample_set=self.inputs_emulated)
Пример #5
0
def postprocess(station_nums, ref_num):

    filename = 'P_q' + str(station_nums[0] + 1) + '_q' + str(station_nums[1] +
                                                             1)
    if len(station_nums) == 3:
        filename += '_q' + str(station_nums[2] + 1)
    filename += '_ref_' + str(ref_num + 1)

    data = Q[:, station_nums]
    output_sample_set = sample.sample_set(data.shape[1])
    output_sample_set.set_values(data)
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    output_probability_set = sfun.regular_partition_uniform_distribution_rectangle_scaled(\
            output_sample_set, q_ref, rect_scale=0.15,
            cells_per_dimension=np.ones((data.shape[1],)))

    my_disc = sample.discretization(input_sample_set, output_sample_set,
                                    output_probability_set)

    # Calclate P on the actual samples with assumption that voronoi cells have
    # equal size
    input_sample_set.estimate_volume_mc()
    print "Calculating prob"
    calcP.prob(my_disc)
    sample.save_discretization(my_disc, filename, "prob_solution")
Пример #6
0
    def setUp(self):
        """
        Set up problem.
        """
        super(regular_partition_uniform_distribution_rectangle_scaled_list, self).setUp()
        if type(self.Q_ref) != np.array:
            Q_ref = np.array([self.Q_ref])
        else:
            Q_ref = self.Q_ref
        if len(self.data_domain.shape) == 1:
            data_domain = np.expand_dims(self.data_domain, axis=0)
        else:
            data_domain = self.data_domain

        self.rect_domain = np.zeros((data_domain.shape[0], 2))
        binratio = 0.1*np.ones((data_domain.shape[0],))
        r_width = binratio*data_domain[:,1]

        self.rect_domain[:, 0] = Q_ref - .5*r_width
        self.rect_domain[:, 1] = Q_ref + .5*r_width

        self.data_prob = sFun.regular_partition_uniform_distribution_rectangle_scaled(
            self.data, self.Q_ref, binratio)
        self.rho_D_M = self.data_prob._probabilities
        self.d_distr_samples = self.data_prob._values
Пример #7
0
    def setUp(self):
        param_ref = np.array([0.5])
        Q_ref = linear_model3(param_ref)

        sampler = bsam.sampler(linear_model3)
        input_samples = sample.sample_set(1)
        input_samples.set_domain(np.repeat([[0.0, 1.0]], 1, axis=0))
        input_samples = sampler.random_sample_set(
            'random', input_samples, num_samples=1E2)
        disc = sampler.compute_QoI_and_create_discretization(input_samples,
                                                             globalize=True)
        simpleFunP.regular_partition_uniform_distribution_rectangle_scaled(
            data_set=disc, Q_ref=Q_ref, rect_scale=0.5)
        num = disc.check_nums()
        disc._output_sample_set.set_error_estimates(0.01 * np.ones((num, 1)))
        jac = np.zeros((num, 1, 1))
        jac[:, :, :] = np.array([[0.506]]).transpose()

        disc._input_sample_set.set_jacobians(jac)
        self.disc = disc
Пример #8
0
    def setUp(self):
        param_ref = np.array([0.5])
        Q_ref = linear_model3(param_ref)

        sampler = bsam.sampler(linear_model3)
        input_samples = sample.sample_set(1)
        input_samples.set_domain(np.repeat([[0.0, 1.0]], 1, axis=0))
        input_samples = sampler.random_sample_set(
            'random', input_samples, num_samples=1E2)
        disc = sampler.compute_QoI_and_create_discretization(input_samples,
                                                             globalize=True)
        simpleFunP.regular_partition_uniform_distribution_rectangle_scaled(
            data_set=disc, Q_ref=Q_ref, rect_scale=0.5)
        num = disc.check_nums()
        disc._output_sample_set.set_error_estimates(0.01 * np.ones((num, 1)))
        jac = np.zeros((num, 1, 1))
        jac[:, :, :] = np.array([[0.506]]).transpose()

        disc._input_sample_set.set_jacobians(jac)
        self.disc = disc
Пример #9
0
    def setUp(self):
        """
        Setup map.
        """
        param_ref = np.array([0.5, 0.5, 0.5])
        Q_ref =  linear_model1(param_ref)
        
        sampler = bsam.sampler(linear_model1)
        input_samples = sample.sample_set(3)
        input_samples.set_domain(np.repeat([[0.0, 1.0]], 3, axis=0))
        input_samples = sampler.random_sample_set('random', input_samples, num_samples=1E2)
        disc = sampler.compute_QoI_and_create_discretization(input_samples, 
                                                             globalize=True)
        simpleFunP.regular_partition_uniform_distribution_rectangle_scaled(
        data_set=disc, Q_ref=Q_ref, rect_scale=0.5)
        num = disc.check_nums()
        disc._output_sample_set.set_error_estimates(0.01 * np.ones((num, 2)))
        jac = np.zeros((num,2,3))
        jac[:,:,:] = np.array([[0.506, 0.463],[0.253, 0.918], [0.085, 0.496]]).transpose()

        disc._input_sample_set.set_jacobians(jac)
        self.sur = surrogates.piecewise_polynomial_surrogate(disc)
Пример #10
0
def postprocess(station_nums, ref_num):

    filename = 'P_q' + str(station_nums[0] + 1) + \
        '_q' + str(station_nums[1] + 1)
    if len(station_nums) == 3:
        filename += '_q' + str(station_nums[2] + 1)
    filename += '_ref_' + str(ref_num + 1)

    data = Q[:, station_nums]
    output_sample_set = sample.sample_set(data.shape[1])
    output_sample_set.set_values(data)
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    output_probability_set = sfun.regular_partition_uniform_distribution_rectangle_scaled(
        output_sample_set,
        q_ref,
        rect_scale=0.15,
        cells_per_dimension=np.ones((data.shape[1], )))

    num_l_emulate = 1e4
    set_emulated = bsam.random_sample_set('r', lam_domain, num_l_emulate)
    my_disc = sample.discretization(input_sample_set,
                                    output_sample_set,
                                    output_probability_set,
                                    emulated_input_sample_set=set_emulated)

    print("Finished emulating lambda samples")

    # Calculate P on lambda emulate
    print("Calculating prob_on_emulated_samples")
    calcP.prob_on_emulated_samples(my_disc)
    sample.save_discretization(my_disc, filename,
                               "prob_on_emulated_samples_solution")

    # Calclate P on the actual samples with assumption that voronoi cells have
    # equal size
    input_sample_set.estimate_volume_mc()
    print("Calculating prob")
    calcP.prob(my_disc)
    sample.save_discretization(my_disc, filename, "prob_solution")

    # Calculate P on the actual samples estimating voronoi cell volume with MC
    # integration
    calcP.prob_with_emulated_volumes(my_disc)
    print("Calculating prob_with_emulated_volumes")
    sample.save_discretization(my_disc, filename,
                               "prob_with_emulated_volumes_solution")
Пример #11
0
    def setUp(self):
        self.inputs = samp.sample_set(3)
        self.outputs = samp.sample_set(2)
        self.inputs.set_values(np.loadtxt(data_path + "/3to2_samples.txt.gz"))
        self.outputs.set_values(np.loadtxt(data_path + "/3to2_data.txt.gz"))
        Q_ref = np.array([0.422, 0.9385])
        self.output_prob = simpleFunP.regular_partition_uniform_distribution_rectangle_scaled(
            self.outputs, Q_ref=Q_ref, rect_scale=0.2, cells_per_dimension=1)

        self.inputs.set_domain(np.array([[0.0, 1.0],
                                         [0.0, 1.0],
                                         [0.0, 1.0]]))
        import numpy.random as rnd
        rnd.seed(1)
        self.inputs_emulated = bsam.random_sample_set('r',
                                                      self.inputs.get_domain(), num_samples=1001, globalize=True)
        self.disc = samp.discretization(input_sample_set=self.inputs,
                                        output_sample_set=self.outputs,
                                        output_probability_set=self.output_prob,
                                        emulated_input_sample_set=self.inputs_emulated)
Пример #12
0
plotD.scatter_2D_multi(input_samples, ref_sample= param_ref, showdim = 'all',
                       filename = 'linearMap_ParameterSamples',
                       file_extension = '.eps')
plotD.show_data_domain_2D(my_discretization, Q_ref = Q_ref, file_extension='.eps')

'''
Suggested changes for user:

Try different ways of discretizing the probability measure on D defined as a uniform
probability measure on a rectangle (since D is 2-dimensional) centered at Q_ref whose
size is determined by scaling the circumscribing box of D.
'''
randomDataDiscretization = False
if randomDataDiscretization is False:
    simpleFunP.regular_partition_uniform_distribution_rectangle_scaled(
        data_set=my_discretization, Q_ref=Q_ref, rect_scale=0.25,
        cells_per_dimension = 3)
else:
    simpleFunP.uniform_partition_uniform_distribution_rectangle_scaled(
        data_set=my_discretization, Q_ref=Q_ref, rect_scale=0.25,
        M=50, num_d_emulate=1E5)

# calculate probablities
calculateP.prob(my_discretization)

########################################
# Post-process the results
########################################
'''
Suggested changes for user:
if Q_ref.size == 2:
    plotD.show_data_domain_2D(my_discretization,
                              Q_ref=Q_ref,
                              file_extension=".eps")
'''
Suggested changes for user:

Try different ways of discretizing the probability measure on D defined
as a uniform probability measure on a rectangle or interval depending
on choice of QoI_num in myModel.py.
'''
randomDataDiscretization = False
if randomDataDiscretization is False:
    simpleFunP.regular_partition_uniform_distribution_rectangle_scaled(
        data_set=my_discretization,
        Q_ref=Q_ref,
        rect_scale=0.25,
        cells_per_dimension=3)
else:
    simpleFunP.uniform_partition_uniform_distribution_rectangle_scaled(
        data_set=my_discretization,
        Q_ref=Q_ref,
        rect_scale=0.25,
        M=50,
        num_d_emulate=1E5)

# calculate probabilities
calculateP.prob(my_discretization)

########################################
# Post-process the results
Пример #14
0
# create a discretization object
my_discretization = samp.discretization(input_sample_set=input_samples,
                                        output_sample_set=output_samples)

file_name_base = "KL_cor_var_inverse_rscale"
disc_name_base = "disc_prob"
for scale in range(1):
    scale = 5
    fact = 2**scale
    fname = file_name_base + "_p05d" + str(fact)
    fname2 = disc_name_base + "_p05d" + str(fact)

    simpleFunP.regular_partition_uniform_distribution_rectangle_scaled(
        data_set=my_discretization,
        Q_ref=Q_ref,
        rect_scale=0.05 / fact,
        center_pts_per_edge=1)

    # calculate the induced probability
    calculateP.prob(my_discretization)
    samp.save_discretization(my_discretization, fname)
    P = input_samples.get_probabilities()

    case = []
    discrete_prob = np.zeros((np.size(var), np.size(eta)))
    '''
     discrete_prob[i][j][k] = prob of i^th variance, j^th correlation length
    '''
    i = 0
    j = 0
input_samples = samp.sample_set(1)
input_samples.set_domain(np.array([[1.5, 4.5]]))
input_samples.set_values(samples)
# associate volume
input_samples.estimate_volume_mc()

output_samples = samp.sample_set(2)
output_samples.set_values(data)

disc_name_base = "disc_prob_simpleFunc"
# create a discretization object
my_discretization = samp.discretization(input_sample_set=input_samples,
                                        output_sample_set=output_samples)
fact = 4
simpleFunP.regular_partition_uniform_distribution_rectangle_scaled(
    data_set=my_discretization, Q_ref=Q_ref, rect_scale=0.2 / fact,
    center_pts_per_edge=1)

# calculate the induced probability
calculateP.prob(my_discretization)
samp.save_discretization(my_discretization, fname)
P = input_samples.get_probabilities()

case = []
discrete_prob = np.zeros(3)
print np.shape(P)

print n_samples
stride = 0
for meso in range(3):
    print stride, stride + n_samples