示例#1
0
    def setUp(self):
        # Define the parameter space (Lambda)
        self.input_dim = 15
        self.input_set = sample.sample_set(self.input_dim)
        self.input_set_centers = sample.sample_set(self.input_dim)

        self.lam_domain = np.zeros((self.input_dim, 2))
        np.random.seed(0)
        self.lam_domain[:,0] = -1*np.random.random(self.input_dim) - 2
        self.lam_domain[:,1] = -1*np.random.random(self.input_dim)

        self.input_set.set_domain(self.lam_domain)
        self.input_set_centers.set_domain(self.lam_domain)

        # Choose random centers to cluster points around
        self.num_centers = 143
        self.centers = (self.lam_domain[:,1] - self.lam_domain[:,0]) * \
            np.random.random((self.num_centers,self.input_dim)) + \
            self.lam_domain[:,0]
        self.input_set_centers.set_values(self.centers)
        self.num_close = self.input_dim + 1
        self.rvec = 0.1

        # Choose array shapes for RBF methods
        np.random.seed(0)
        self.radii_rbf = np.random.random([self.num_close, self.num_close])
        self.radii_rbfdxi = np.random.random([self.input_dim, self.num_close])
        self.dxi = np.random.random([self.input_dim, self.num_close])

        # Define example linear functions (QoIs) for gradient approximation
        # methods
        self.output_dim = 37
        coeffs = np.random.random((self.input_dim,
            self.output_dim-self.input_dim))
        self.coeffs = np.append(coeffs, np.eye(self.input_dim), axis=1)
示例#2
0
        def setUp(self):
            self.input_dim = 2
            self.input_set = sample.sample_set(self.input_dim)
            self.input_set_centers = sample.sample_set(self.input_dim)
            self.output_dim_return = 2
            self.num_optsets_return = 5
            self.radius = 0.01
            np.random.seed(0)
            self.num_centers = 10
            self.centers = np.random.random((self.num_centers, self.input_dim))
            self.input_set_centers.set_values(self.centers)
            self.input_set = grad.sample_l1_ball(self.input_set_centers,
                    self.input_dim + 1, self.radius)
            
            self.output_dim = 28
            self.output_set = sample.sample_set(self.output_dim)
            coeffs = np.zeros((self.input_dim, 2*self.input_dim))
            coeffs = np.append(coeffs, np.random.random((self.input_dim,
                self.output_dim - 3 * self.input_dim)), axis=1)
            self.coeffs = np.append(coeffs, np.eye(self.input_dim), axis=1)

            self.output_set.set_values(self.input_set._values.dot(self.coeffs))
            self.my_disc = sample.discretization(self.input_set,
                    self.output_set)
            self.center_disc = grad.calculate_gradients_rbf(\
                self.my_disc, self.num_centers)
            self.input_set_centers = self.center_disc.get_input_sample_set()

            self.inner_prod_tol = 0.9
            self.measskew_tol = np.inf
示例#3
0
    def setUp(self):
        # Define the input domain (Lambda)
        self.input_dim = 1
        self.input_set = sample.sample_set(self.input_dim)
        self.input_set_centers = sample.sample_set(self.input_dim)

        self.lam_domain = np.zeros((self.input_set.get_dim(), 2))
        self.lam_domain[:,0] = np.zeros(self.input_set.get_dim())
        self.lam_domain[:,1] = np.ones(self.input_set.get_dim())

        self.input_set.set_domain(self.lam_domain)
        self.input_set_centers.set_domain(self.lam_domain)

        # Choose random centers in input_domian to cluster points around
        self.num_centers = 1
        self.num_close = self.input_set.get_dim() + 1
        self.rvec = 0.1
        np.random.seed(0)
        self.centers = np.random.uniform(self.lam_domain[:, 0],
            self.lam_domain[:, 1] - self.lam_domain[:, 0], [self.num_centers,
            self.input_set.get_dim()])
        self.input_set_centers.set_values(self.centers)

        # Choose array shapes for RBF methods
        np.random.seed(0)
        self.radii_rbf = np.random.random([self.num_close, self.num_close])
        self.radii_rbfdxi = np.random.random([self.input_dim, self.num_close])
        self.dxi = np.random.random([self.input_dim, self.num_close])

        # Define example linear functions (QoIs) for gradient approximation
        # methods
        self.output_dim = 20
        coeffs = np.random.random((self.input_dim,
            self.output_dim-self.input_dim))
        self.coeffs = np.append(coeffs, np.eye(self.input_dim), axis=1)
示例#4
0
 def check_show_data(self, data, sample_nos, q_ref, save, qnums, showdim):
     """
     Check to see that the :meth:`bet.postTools.plotDomains.scatter_rhoD` ran
     without generating an error.
     """
     try:
         if data.shape[1] == 4:
             data_obj_temp = sample.sample_set(4)
             data_obj_temp.set_values(data)
             plotDomains.scatter_rhoD(data_obj_temp, q_ref, sample_nos,
                                      'output', self.rho_D, qnums, None, showdim, save,
                                      False)
         else:
             data_obj_temp = sample.sample_set(data.shape[1])
             data_obj_temp.set_values(data)
             plotDomains.scatter_rhoD(data_obj_temp, q_ref, sample_nos,
                                      None, None, qnums, None, showdim, save, False)
         go = True
     except (RuntimeError, TypeError, NameError):
         print("ERROR")
         print("data shape:", data.shape)
         print("data ref:", q_ref)
         print("samples nums:", sample_nos)
         print("save:", save)
         print("qnums:", qnums)
         print("showdim:", showdim)
         go = False
     nptest.assert_equal(go, True)
示例#5
0
    def setUp(self):
        """
        Set up problem.
        """

        import numpy.random as rnd
        rnd.seed(1)
        self.inputs = samp.sample_set(1)
        self.outputs = samp.sample_set(1)
        self.lam_domain = np.zeros((1, 2))
        self.lam_domain[:, 0] = 0.0
        self.lam_domain[:, 1] = 1.0
        self.inputs.set_domain(self.lam_domain)
        self.inputs.set_values(rnd.rand(100,))
        self.num_l_emulate = 1001
        self.inputs = bsam.random_sample_set('r',
                                             self.inputs.get_domain(), num_samples=1001, globalize=True)
        self.outputs.set_values(2.0*self.inputs._values)
        Q_ref = np.mean(self.outputs._values, axis=0)
        self.inputs_emulated = bsam.random_sample_set('r',
                                                      self.inputs.get_domain(), num_samples=self.num_l_emulate,
                                                      globalize=True)
        self.output_prob = simpleFunP.regular_partition_uniform_distribution_rectangle_scaled(
            self.outputs, Q_ref=Q_ref, rect_scale=0.2, cells_per_dimension=1)
        self.disc = samp.discretization(input_sample_set=self.inputs,
                                        output_sample_set=self.outputs,
                                        output_probability_set=self.output_prob,
                                        emulated_input_sample_set=self.inputs_emulated)
示例#6
0
    def test_calculate_gradients_cfd(self):
        """
        Test :meth:`bet.sensitivity.gradients.calculate_gradients_cfd`.
        """
        self.output_set = sample.sample_set(self.output_dim)
        self.cluster_set = grad.pick_cfd_points(self.input_set_centers,
            self.rvec)
        num_centers = self.input_set_centers.check_num()
        self.output_set.set_values(self.cluster_set._values.dot(self.coeffs))
        self.cluster_disc = sample.discretization(self.cluster_set,
                self.output_set)

        self.center_disc = grad.calculate_gradients_cfd(self.cluster_disc)
        self.jacobians = self.center_disc._input_sample_set._jacobians

        # Test the method returns the correct size tensor
        self.assertEqual(self.jacobians.shape, 
                (self.num_centers, self.output_dim, self.input_dim))

        # Test that each vector is normalized or a zero vector
        normG = np.linalg.norm(self.jacobians,
                ord=1, axis=2)

        # If its a zero vectors, make it the unit vector in input_dim
        self.jacobians[normG==0] = 1.0/self.input_dim
        nptest.assert_array_almost_equal(np.linalg.norm(self.jacobians, ord=1,
            axis=2), np.ones((self.jacobians.shape[0],
                self.jacobians.shape[1])))
示例#7
0
    def check_show_param(self, samples, sample_nos, p_ref, save, lnums,
                         showdim):
        """
        Check to see that the :meth:`bet.postTools.plotDomains.scatter_rhoD` ran
        without generating an error.
        """
        try:
            input_sample_set_temp = sample.sample_set(samples.shape[1])
            input_sample_set_temp.set_values(samples)
            disc_obj_temp = sample.discretization(input_sample_set_temp,
                                                  self.disc._output_sample_set)
            plotDomains.scatter_rhoD(disc_obj_temp, p_ref, sample_nos, 'input',
                                     self.rho_D, lnums, None, showdim, save, False)
            go = True
        except (RuntimeError, TypeError, NameError) as error:
            print("ERROR:", error)
            print("samples shape:", samples.shape)
            print("param ref:", p_ref)
            print("samples nums:", sample_nos)
            print("save:", save)
            print("lnums:", lnums)
            print("showdim:", showdim)
            go = False

        nptest.assert_equal(go, True)
示例#8
0
    def check_show_data_domain_2D(self, ref_markers, ref_colors, triangles,
                                  save, filenames):
        """
        Check to see that the
        :meth:`bet.postTools.plotDomains.show_data_domain_2D` ran
        without generating an error.
        """
        Q_ref = self.disc._output_sample_set.get_values()[:, [0, 1]]
        Q_ref = Q_ref[[1, 4], :]

        data_obj_temp = sample.sample_set(2)
        data_obj_temp.set_values(
            self.disc._output_sample_set.get_values()[:, [0, 1]])
        disc_obj_temp = sample.discretization(
            self.disc._input_sample_set, data_obj_temp)

        try:
            plotDomains.show_data_domain_2D(
                disc_obj_temp, Q_ref,
                ref_markers, ref_colors, triangles=triangles, save=save,
                filenames=filenames)
            go = True
        except (RuntimeError, TypeError, NameError):
            go = False

        nptest.assert_equal(go, True)
示例#9
0
文件: Q_3D.py 项目: leiyangcq/BET
def postprocess(station_nums, ref_num):
    
    filename = 'P_q'+str(station_nums[0]+1)+'_q'+str(station_nums[1]+1)
    if len(station_nums) == 3:
        filename += '_q'+str(station_nums[2]+1)
    filename += '_ref_'+str(ref_num+1)

    data = Q[:, station_nums]
    output_sample_set = sample.sample_set(data.shape[1])
    output_sample_set.set_values(data)
    q_ref = Q_ref[ref_num, station_nums]

    # Create Simple function approximation
    # Save points used to parition D for simple function approximation and the
    # approximation itself (this can be used to make close comparisions...)
    output_probability_set = sfun.regular_partition_uniform_distribution_rectangle_scaled(\
            output_sample_set, q_ref, rect_scale=0.15,
            cells_per_dimension=np.ones((data.shape[1],)))

    my_disc = sample.discretization(input_sample_set, output_sample_set,
            output_probability_set)

    # Calclate P on the actual samples with assumption that voronoi cells have
    # equal size
    input_sample_set.estimate_volume_mc()
    print "Calculating prob"
    calcP.prob(my_disc)
    sample.save_discretization(my_disc, filename, "prob_solution")
示例#10
0
def test_loadmat():
    """
    Tests :meth:`bet.sampling.basicSampling.loadmat`
    """
    np.random.seed(1)
    mdat1 = {'num_samples':5}
    mdat2 = {'num_samples':6}
    model = "this is not a model"

    my_input1 = sample_set(1)
    my_input1.set_values(np.random.random((5,1)))
    my_output = sample_set(1)
    my_output.set_values(np.random.random((5,1)))
    my_input2 = sample_set(1)
    my_input2.set_values(np.random.random((6,1)))


    sio.savemat(os.path.join(local_path, 'testfile1'), mdat1)
    sio.savemat(os.path.join(local_path, 'testfile2'), mdat2)

    
    bet.sample.save_discretization(disc(my_input1, my_output),
            (os.path.join(local_path, 'testfile1')), globalize=True)
    bet.sample.save_discretization(disc(my_input2, None),
            os.path.join(local_path, 'testfile2'), "NAME", globalize=True)

    (loaded_sampler1, discretization1) = bsam.loadmat(os.path.join(local_path,
        'testfile1'))
    nptest.assert_array_equal(discretization1._input_sample_set.get_values(),
            my_input1.get_values())
    nptest.assert_array_equal(discretization1._output_sample_set.get_values(),
            my_output.get_values())
    assert loaded_sampler1.num_samples == 5
    assert loaded_sampler1.lb_model is None

    (loaded_sampler2, discretization2) = bsam.loadmat(os.path.join(local_path,
        'testfile2'), disc_name="NAME", model=model)
    nptest.assert_array_equal(discretization2._input_sample_set.get_values(),
            my_input2.get_values())
    assert discretization2._output_sample_set is None
    assert loaded_sampler2.num_samples == 6
    assert loaded_sampler2.lb_model == model
    if os.path.exists(os.path.join(local_path, 'testfile1.mat')):
        os.remove(os.path.join(local_path, 'testfile1.mat'))
    if os.path.exists(os.path.join(local_path, 'testfile2.mat')):
        os.remove(os.path.join(local_path, 'testfile2.mat'))
示例#11
0
    def setUp(self):
        # Define the parameter space (Lambda)
        self.input_dim = 2
        self.input_set_rbf = sample.sample_set(self.input_dim)
        self.input_set_ffd = sample.sample_set(self.input_dim)
        self.input_set_cfd = sample.sample_set(self.input_dim)

        self.input_set_centers = sample.sample_set(self.input_dim)

        self.output_dim = 2
        self.output_set_rbf = sample.sample_set(self.output_dim)
        self.output_set_ffd = sample.sample_set(self.output_dim)
        self.output_set_cfd = sample.sample_set(self.output_dim)

        self.lam_domain = np.zeros((self.input_dim, 2))
        self.lam_domain[:, 0] = np.zeros(self.input_dim)
        self.lam_domain[:, 1] = np.ones(self.input_dim)

        self.input_set_rbf.set_domain(self.lam_domain)
        self.input_set_ffd.set_domain(self.lam_domain)
        self.input_set_cfd.set_domain(self.lam_domain)

        # Choose random centers to cluster points around
        self.num_centers = 100
        np.random.seed(0)
        self.centers = (self.lam_domain[:, 1] - self.lam_domain[:, 0]) * \
            np.random.random((self.num_centers, self.input_dim)) + \
            self.lam_domain[:, 0]
        self.input_set_centers.set_values(self.centers)
        self.num_close = self.input_dim + 1
        self.rvec = 0.01 * np.ones(self.input_dim)

        self.input_set_rbf = grad.sample_l1_ball(self.input_set_centers,
                                                 self.num_close, self.rvec)
        self.input_set_ffd = grad.pick_ffd_points(self.input_set_centers,
                                                  self.rvec)
        self.input_set_cfd = grad.pick_cfd_points(self.input_set_centers,
                                                  self.rvec)

        # Define a vector valued function f : [0,1]x[0,1] -> [x^2, y^2]
        def f(x):
            f = np.zeros(x.shape)
            f[:, 0] = x[:, 0]**2
            f[:, 1] = x[:, 1]**2
            return f

        self.output_set_rbf.set_values(f(self.input_set_rbf.get_values()))
        self.output_set_ffd.set_values(f(self.input_set_ffd.get_values()))
        self.output_set_cfd.set_values(f(self.input_set_cfd.get_values()))
        self.cluster_disc_rbf = sample.discretization(
            self.input_set_rbf, self.output_set_rbf)
        self.cluster_disc_ffd = sample.discretization(
            self.input_set_ffd, self.output_set_ffd)
        self.cluster_disc_cfd = sample.discretization(
            self.input_set_cfd, self.output_set_cfd)

        self.G_exact = np.zeros([self.num_centers, self.output_dim,
                                 self.input_dim])
        self.G_exact[:, 0, 0] = 2 * self.centers[:, 0]
        self.G_exact[:, 1, 1] = 2 * self.centers[:, 1]
示例#12
0
 def createData(self):
     """
     Set up data.
     """
     self.data = samp.sample_set(1)
     self.data.set_values(np.random.random((100,1))*10.0)
     self.Q_ref = np.array([5.0])
     self.data_domain = np.expand_dims(np.array([0.0, 10.0]), axis=0)
     self.mdim = 1
示例#13
0
 def createData(self):
     """
     Set up data.
     """
     self.data = samp.sample_set(2)
     self.data.set_values(np.random.random((100,2))*10.0)
     self.Q_ref = np.array([5.0, 5.0])
     self.data_domain = np.array([[0.0, 10.0], [0.0, 10.0]])
     self.mdim = 2
示例#14
0
def verify_compute_QoI_and_create_discretization(model, sampler,
                                                 input_sample_set,
                                                 savefile):
    """
    Verify that the user samples are correct.
    """
    # evalulate the model at the samples directly
    output_values = (model(input_sample_set._values))
    if len(output_values.shape) == 1:
        output_sample_set = sample_set(1)
    else:
        output_sample_set = sample_set(output_values.shape[1])
    output_sample_set.set_values(output_values)
    discretization = disc(input_sample_set, output_sample_set)

    # evaluate the model at the sample
    print savefile, input_sample_set.get_dim()
    my_discretization = sampler.compute_QoI_and_create_discretization(\
        input_sample_set, savefile, globalize=True) 
    #comm.barrier()

    my_num = my_discretization.check_nums() 

    # compare the samples
    nptest.assert_array_equal(my_discretization._input_sample_set.get_values(),
            discretization._input_sample_set.get_values())
    # compare the data
    nptest.assert_array_equal(my_discretization._output_sample_set.get_values(),
            discretization._output_sample_set.get_values())

    # did num_samples get updated?
    assert my_num == sampler.num_samples
   
    # did the file get correctly saved?
    saved_disc = bet.sample.load_discretization(savefile)
    mdat = sio.loadmat(savefile)
    print "HERE HERE", mdat, my_num
    #comm.barrier()
    # compare the samples
    nptest.assert_array_equal(my_discretization._input_sample_set.get_values(),
        saved_disc._input_sample_set.get_values())
    # compare the data
    nptest.assert_array_equal(my_discretization._output_sample_set.get_values(),
       saved_disc._output_sample_set.get_values())
示例#15
0
    def setUp(self):
        self.inputs = samp.sample_set(3)
        self.outputs = samp.sample_set(2)
        self.inputs.set_values(np.loadtxt(data_path + "/3to2_samples.txt.gz"))
        self.outputs.set_values(np.loadtxt(data_path + "/3to2_data.txt.gz"))
        Q_ref = np.array([0.422, 0.9385])
        self.output_prob = simpleFunP.regular_partition_uniform_distribution_rectangle_scaled(
            self.outputs, Q_ref=Q_ref, rect_scale=0.2, cells_per_dimension=1)

        self.inputs.set_domain(np.array([[0.0, 1.0],
                                         [0.0, 1.0],
                                         [0.0, 1.0]]))
        import numpy.random as rnd
        rnd.seed(1)
        self.inputs_emulated = bsam.random_sample_set('r',
                                                      self.inputs.get_domain(), num_samples=1001, globalize=True)
        self.disc = samp.discretization(input_sample_set=self.inputs,
                                        output_sample_set=self.outputs,
                                        output_probability_set=self.output_prob,
                                        emulated_input_sample_set=self.inputs_emulated)
示例#16
0
def verify_regular_sample_set_dimension(sampler, input_dim,
                                         num_samples_per_dim):

    input_domain = np.repeat([[0, 1]], input_dim, axis=0)
    input_sample_set = sample_set(input_dim)
    input_sample_set.set_domain(input_domain)

    test_sample_set = input_sample_set
    dim = input_dim
    # recreate the samples
    if num_samples_per_dim is None:
        num_samples_per_dim = 5

    if not isinstance(num_samples_per_dim, collections.Iterable):
        num_samples_per_dim = num_samples_per_dim * np.ones((dim,), dtype='int')

    sampler.num_samples = np.product(num_samples_per_dim)

    test_domain = test_sample_set.get_domain()
    if test_domain is None:
        test_domain = np.repeat([[0, 1]], test_sample_set.get_dim(), axis=0)

    test_values = np.zeros((sampler.num_samples, test_sample_set.get_dim()))

    vec_samples_dimension = np.empty((dim), dtype=object)
    for i in np.arange(0, dim):
        bin_width = (test_domain[i, 1] - test_domain[i, 0]) / \
                    np.float(num_samples_per_dim[i])
        vec_samples_dimension[i] = list(np.linspace(
            test_domain[i, 0] - 0.5 * bin_width,
            test_domain[i, 1] + 0.5 * bin_width,
            num_samples_per_dim[i] + 2))[1:num_samples_per_dim[i] + 1]

    if np.equal(dim, 1):
        arrays_samples_dimension = np.array([vec_samples_dimension])
    else:
        arrays_samples_dimension = np.meshgrid(
            *[vec_samples_dimension[i] for i in np.arange(0, dim)], indexing='ij')

    if np.equal(dim, 1):
        test_values = arrays_samples_dimension.transpose()
    else:
        for i in np.arange(0, dim):
            test_values[:, i:i + 1] = np.vstack(arrays_samples_dimension[i].flat[:])

    test_sample_set.set_values(test_values)

    # create the sample set from sampler
    my_sample_set = sampler.regular_sample_set(input_dim,
                                            num_samples_per_dim=num_samples_per_dim)

    # compare the samples
    nptest.assert_array_equal(test_sample_set._values,
                              my_sample_set._values)
示例#17
0
 def setUp(self):
     """
     Set Up
     """
     self.t_set = asam.transition_set(.5, .5**5, 1.0)
     self.output_set = sample_set(self.mdim)
     self.output_set.set_values(self.output)
     self.output_set.global_to_local()
     # Update _right_local, _left_local, _width_local
     self.output_set.set_domain(self.output_domain)
     self.output_set.update_bounds()
     self.output_set.update_bounds_local()
示例#18
0
    def test_scatter_2D_multi(self):
        """
        Test :meth:`bet.postTools.plotDomins.scatter_2D_multi`
        """
        if not os.path.exists('figs/'):
            os.mkdir('figs/')
        try:
            input_sample_set_temp = sample.sample_set(3)
            input_sample_set_temp.set_values(self.disc._input_sample_set.get_values()[:, [0,1,2]])

            plotDomains.scatter_2D_multi(input_sample_set_temp)
            go = True
        except (RuntimeError, TypeError, NameError):
            go = False

        nptest.assert_equal(go, True)
示例#19
0
    def check_scatter_3D(self, sample_nos, p_ref, save):
        """
        Check to see that the :meth:`bet.postTools.plotDomains.scatter_3D` ran
        without generating an error.
        """
        try:
            input_sample_set_temp = sample.sample_set(3)
            input_sample_set_temp.set_values(self.disc._input_sample_set.get_values()[:, [0, 1, 2]])
            plotDomains.scatter_3D(
                input_sample_set_temp,
                sample_nos,
                self.disc._input_sample_set.get_probabilities(),
                p_ref, save, False, 'XLABEL', 'YLABEL', 'ZLABEL', self.filename)
            go = True
        except (RuntimeError, TypeError, NameError):
            go = False

        nptest.assert_equal(go, True)
示例#20
0
    def setUp(self):
        """
        Set up problem.
        """
        emulated_input_samples = sample.sample_set(2)
        emulated_input_samples.set_domain(np.array([[0.0,1.0],[0.0,1.0]]))

        emulated_input_samples.set_values_local(util.meshgrid_ndim((np.linspace(emulated_input_samples.get_domain()[0][0],
            emulated_input_samples.get_domain()[0][1], 10),
            np.linspace(emulated_input_samples.get_domain()[1][0],
                emulated_input_samples.get_domain()[1][1], 10))))

        emulated_input_samples.set_probabilities_local(1.0/float(comm.size)*\
                (1.0/float(emulated_input_samples.get_values_local().shape[0]))*\
                np.ones((emulated_input_samples.get_values_local().shape[0],)))
        emulated_input_samples.check_num()

        self.samples = emulated_input_samples
示例#21
0
文件: simpleFunP.py 项目: UT-CHG/BET
def uniform_partition_uniform_distribution_data_samples(data_set):
    r"""
    Creates a simple function approximation of :math:`\rho_{\mathcal{D},M}`
    where :math:`\rho_{\mathcal{D},M}` is a uniform probability density over
    the entire ``data_domain``. Here the ``data_domain`` is the union of
    voronoi cells defined by ``data``. In other words we assign each sample the
    same probability, so ``M = len(data)`` or rather ``len(d_distr_samples) ==
    len(data)``. The purpose of this method is to approximate uniform
    distributions over irregularly shaped domains.

    :param data_set: Sample set that the probability measure is defined for.
    :type data_set: :class:`~bet.sample.discretization` 
        or :class:`~bet.sample.sample_set` or :class:`~numpy.ndarray`

    :rtype: :class:`~bet.sample.voronoi_sample_set`
    :returns: sample_set object defininng simple function approximation
    """
    if isinstance(data_set, samp.sample_set_base):
        num = data_set.check_num()
        dim = data_set._dim
        values = data_set._values
        s_set = data_set.copy()
    elif isinstance(data_set, samp.discretization):
        num = data_set.check_nums()
        dim = data_set._output_sample_set._dim
        values = data_set._output_sample_set._values
        s_set = data_set._output_sample_set.copy()
    elif isinstance(data_set, np.ndarray):
        num = data_set.shape[0]
        dim = data_set.shape[1]
        values = data_set
        s_set = samp.sample_set(dim=dim)
        s_set.set_values(values)
    else:
        msg = "The first argument must be of type bet.sample.sample_set, "
        msg += "bet.sample.discretization or np.ndarray"
        raise wrong_argument_type(msg)

    s_set.set_probabilities(np.ones((num,), dtype=np.float)/num)

    if isinstance(data_set, samp.discretization):
        data_set._output_probability_set = s_set
        data_set.set_io_ptr(globalize=False)
    return s_set
示例#22
0
def pick_cfd_points(input_set, radii_vec):
    r"""
    Pick 2*input_dim points, for each center, for centered finite difference
    gradient approximation.  The center are not needed for the CFD gradient
    approximation, they are returned for consistency with the other methods and
    because of the common need to have not just the gradient but also the QoI
    value at the centers in adaptive sampling algorithms.The points are returned 
    in the order: centers, followed by the cluster around the first center, then 
    the cluster around the second center and so on.
    
    :param input_set: The input sample set.  Make sure the attribute _values is
        not None
    :type input_set: :class:`~bet.sample.sample_set`
    :param radii_vec: The radius of the stencil, along each axis
    :type radii_vec: :class:`numpy.ndarray` of shape (input_dim,)
    
    :rtype: :class:`~bet.sample.sample_set`
    :returns: Centers and clusters of samples near each center (values are 
        :class:`numpy.ndarray` of shape ((``num_close+1``)*``num_centers``,
        ``input_dim``))
    
    """
    if input_set._values is None:
        raise ValueError("You must have values to use this method.")
    input_dim = input_set.get_dim()
    centers = input_set.get_values()
    num_centers = centers.shape[0]
    samples = np.repeat(centers, 2 * input_dim, axis=0)
    radii_vec = util.fix_dimensions_vector(radii_vec)

    # Contstruct a [num_centers*2*input_dim, input_dim] array that
    # translates the centers to the CFD points
    ident = np.eye(input_dim) * radii_vec
    translate = np.tile(np.append(ident, -ident, axis=0), (num_centers, 1))
    samples = samples + translate

    cluster_set = sample.sample_set(input_dim)
    if input_set.get_domain() is not None:
        cluster_set.set_domain(input_set.get_domain())
    cluster_set.set_values(centers)
    cluster_set.append_values(samples)
    return cluster_set
示例#23
0
    def setUp(self):
        """
        Set up problem.
        """
        emulated_input_samples = sample.sample_set(1)
        emulated_input_samples.set_domain(np.array([[0.0, 1.0]]))

        num_samples=1000

        emulated_input_samples.set_values_local(np.linspace(emulated_input_samples.get_domain()[0][0],
                                             emulated_input_samples.get_domain()[0][1],
                                             num_samples+1))

        emulated_input_samples.set_probabilities_local(1.0/float(comm.size)*(1.0/float(\
                emulated_input_samples.get_values_local().shape[0]))\
                *np.ones((emulated_input_samples.get_values_local().shape[0],)))

        emulated_input_samples.check_num()

        self.samples = emulated_input_samples
示例#24
0
    def setUp(self):
        param_ref = np.array([0.5])
        Q_ref = linear_model3(param_ref)

        sampler = bsam.sampler(linear_model3)
        input_samples = sample.sample_set(1)
        input_samples.set_domain(np.repeat([[0.0, 1.0]], 1, axis=0))
        input_samples = sampler.random_sample_set(
            'random', input_samples, num_samples=1E2)
        disc = sampler.compute_QoI_and_create_discretization(input_samples,
                                                             globalize=True)
        simpleFunP.regular_partition_uniform_distribution_rectangle_scaled(
            data_set=disc, Q_ref=Q_ref, rect_scale=0.5)
        num = disc.check_nums()
        disc._output_sample_set.set_error_estimates(0.01 * np.ones((num, 1)))
        jac = np.zeros((num, 1, 1))
        jac[:, :, :] = np.array([[0.506]]).transpose()

        disc._input_sample_set.set_jacobians(jac)
        self.disc = disc
示例#25
0
    def setUp(self):
        # create 1-1 map
        self.input_domain1 = np.column_stack((np.zeros((1,)), np.ones((1,))))

        def map_1t1(x):
            return np.sin(x)
        # create 3-1 map
        self.input_domain3 = np.column_stack((np.zeros((3,)), np.ones((3,))))

        def map_3t1(x):
            return np.sum(x, 1)
        # create 3-2 map

        def map_3t2(x):
            return np.vstack(([x[:, 0]+x[:, 1], x[:, 2]])).transpose()
        # create 10-4 map
        self.input_domain10 = np.column_stack(
            (np.zeros((10,)), np.ones((10,))))

        def map_10t4(x):
            x1 = x[:, 0] + x[:, 1]
            x2 = x[:, 2] + x[:, 3]
            x3 = x[:, 4] + x[:, 5]
            x4 = np.sum(x[:, [6, 7, 8, 9]], 1)
            return np.vstack([x1, x2, x3, x4]).transpose()
        num_samples = 100
        self.savefiles = ["11t11", "1t1", "3to1", "3to2", "10to4"]
        self.models = [map_1t1, map_1t1, map_3t1, map_3t2, map_10t4]
        self.samplers = []
        for model in self.models:
            self.samplers.append(bsam.sampler(model, num_samples))

        self.input_dim1 = 1
        self.input_dim2 = 2
        self.input_dim3 = 10

        self.input_sample_set1 = sample_set(self.input_dim1)
        self.input_sample_set2 = sample_set(self.input_dim2)
        self.input_sample_set3 = sample_set(self.input_dim3)

        self.input_sample_set4 = sample_set(self.input_domain1.shape[0])
        self.input_sample_set4.set_domain(self.input_domain1)

        self.input_sample_set5 = sample_set(self.input_domain3.shape[0])
        self.input_sample_set5.set_domain(self.input_domain3)

        self.input_sample_set6 = sample_set(self.input_domain10.shape[0])
        self.input_sample_set6.set_domain(self.input_domain10)
示例#26
0
    def setUp(self):
        """
        Set up problem.
        """
        input_samples = sample.sample_set(1)
        input_samples.set_domain(np.array([[0.0, 1.0]]))
        # self.lam_domain=np.array([[0.0,1.0]])
        num_samples = 1000
        input_samples.set_values(np.linspace(input_samples.get_domain()[0, 0],
                                             input_samples.get_domain()[0, 1],
                                             num_samples+1))
        #self.samples = np.linspace(self.lam_domain[0][0], self.lam_domain[0][1], num_samples+1)
        input_samples.set_probabilities((1.0/float(input_samples.get_values().shape[0])) *
                                        np.ones((input_samples.get_values().shape[0],)))
        #self.P_samples = (1.0/float(self.samples.shape[0]))*np.ones((self.samples.shape[0],))
        input_samples._probabilities[0] = 0.0
        input_samples._probabilities[-1] *= 2.0
        #self.P_samples[0] = 0.0
        #self.P_samples[-1] *= 2.0

        self.data = input_samples
示例#27
0
def verify_random_sample_set_dimension(sampler, sample_type, input_dim,
                                     num_samples):

    np.random.seed(1)
    # recreate the samples
    if num_samples is None:
        num_samples = sampler.num_samples

    input_domain = np.repeat([[0, 1]], input_dim, axis=0)
    input_sample_set = sample_set(input_dim)
    input_sample_set.set_domain(input_domain)

    input_left = np.repeat([input_domain[:, 0]], num_samples, 0)
    input_right = np.repeat([input_domain[:, 1]], num_samples, 0)

    input_values = (input_right - input_left)
    if sample_type == "lhs":
        input_values = input_values * pyDOE.lhs(input_sample_set.get_dim(),
                                                num_samples, 'center')
    elif sample_type == "random" or "r":
        input_values = input_values * np.random.random(input_left.shape)
    input_values = input_values + input_left
    input_sample_set.set_values(input_values)

    # reset the random seed
    np.random.seed(1)

    # create the sample set from the domain
    my_sample_set = sampler.random_sample_set(sample_type, input_dim,
                                                  num_samples=num_samples)

    # make sure that the samples are within the boundaries
    assert np.all(my_sample_set._values <= input_right)
    assert np.all(my_sample_set._values >= input_left)

    # compare the samples
    if comm.size == 0:
        nptest.assert_array_equal(input_sample_set._values,
                              my_sample_set._values)
示例#28
0
    def test_compute_QoI_and_create_discretization(self):
        """
        Test :meth:`bet.sampling.basicSampling.sampler.user_samples`
        for three different QoI maps (1 to 1, 3 to 1, 3 to 2, 10 to 4).
        """
        # create a list of different sets of samples
        list_of_samples = [np.ones((4, )), np.ones((4, 1)), np.ones((4, 3)),
                np.ones((4, 3)), np.ones((4, 10))]
        list_of_dims = [1, 1, 3, 3, 10]
        
        list_of_sample_sets = [None]*len(list_of_samples)

        for i, array in enumerate(list_of_samples):
            list_of_sample_sets[i] = sample_set(list_of_dims[i])
            list_of_sample_sets[i].set_values(array)

        test_list = zip(self.models, self.samplers, list_of_sample_sets, 
                self.savefiles)

        for model, sampler, input_sample_set, savefile in test_list: 
                verify_compute_QoI_and_create_discretization(model, sampler,
                    input_sample_set, savefile)
示例#29
0
    def setUp(self):
        """
        Setup map.
        """
        param_ref = np.array([0.5, 0.5, 0.5])
        Q_ref =  linear_model1(param_ref)
        
        sampler = bsam.sampler(linear_model1)
        input_samples = sample.sample_set(3)
        input_samples.set_domain(np.repeat([[0.0, 1.0]], 3, axis=0))
        input_samples = sampler.random_sample_set('random', input_samples, num_samples=1E2)
        disc = sampler.compute_QoI_and_create_discretization(input_samples, 
                                                             globalize=True)
        simpleFunP.regular_partition_uniform_distribution_rectangle_scaled(
        data_set=disc, Q_ref=Q_ref, rect_scale=0.5)
        num = disc.check_nums()
        disc._output_sample_set.set_error_estimates(0.01 * np.ones((num, 2)))
        jac = np.zeros((num,2,3))
        jac[:,:,:] = np.array([[0.506, 0.463],[0.253, 0.918], [0.085, 0.496]]).transpose()

        disc._input_sample_set.set_jacobians(jac)
        self.sur = surrogates.piecewise_polynomial_surrogate(disc)
示例#30
0
def pick_ffd_points(input_set, radii_vec):
    r"""
    Pick input_dim points, for each centers, for a forward finite
    difference gradient approximation.  The points are returned in the order:
    centers, followed by the cluster around the first center, then the cluster
    around the second center and so on.
    
    :param input_set: The input sample set.  Make sure the attribute _values is
        not None
    :type input_set: :class:`~bet.sample.sample_set`
    :param radii_vec: The radius of the stencil, along each axis
    :type radii_vec: :class:`numpy.ndarray` of shape (input_dim,)
    
    :rtype: :class:`~bet.sample.sample_set`
    :returns: Centers and clusters of samples near each center (values are 
        :class:`numpy.ndarray` of shape ((``num_close+1``)*``num_centers``,
        ``input_dim``))
    
    """
    if input_set._values is None:
        raise ValueError("You must have values to use this method.")
    input_dim = input_set.get_dim()
    centers = input_set.get_values()
    num_centers = centers.shape[0]
    samples = np.repeat(centers, input_dim, axis=0)
    radii_vec = util.fix_dimensions_vector(radii_vec)

    # Construct a [num_centers*(input_dim+1), input_dim] matrix that
    # translates the centers to the FFD points.
    translate = np.tile(np.eye(input_dim) * radii_vec, (num_centers, 1))
    samples = samples + translate

    cluster_set = sample.sample_set(input_dim)
    if input_set.get_domain() is not None:
        cluster_set.set_domain(input_set.get_domain())
    cluster_set.set_values(centers)
    cluster_set.append_values(samples)
    return cluster_set
示例#31
0
    def setUp(self):
        """
        Setup maps
        """
        param_ref = np.array([0.5])
        Q_ref = linear_model3(param_ref)

        sampler = bsam.sampler(linear_model3)
        input_samples = sample.sample_set(1)
        input_samples.set_domain(np.repeat([[0.0, 1.0]], 1, axis=0))
        input_samples = sampler.random_sample_set('random',
                                                  input_samples,
                                                  num_samples=1E3)
        disc = sampler.compute_QoI_and_create_discretization(input_samples,
                                                             globalize=True)
        simpleFunP.regular_partition_uniform_distribution_rectangle_scaled(
            data_set=disc, Q_ref=Q_ref, rect_scale=0.5)
        num = disc.check_nums()
        disc._output_sample_set.set_error_estimates(0.01 * np.ones((num, 1)))
        jac = np.zeros((num, 1, 1))
        jac[:, :, :] = np.array([[0.506]]).transpose()

        disc._input_sample_set.set_jacobians(jac)
        self.sur = surrogates.piecewise_polynomial_surrogate(disc)
示例#32
0
import bet.calculateP.simpleFunP as sfun
import numpy as np
import scipy.io as sio
import bet.sample as sample

# Import "Truth"
mdat = sio.loadmat('../matfiles/Q_2D')
Q = mdat['Q']
Q_ref = mdat['Q_true']

# Import Data
points = mdat['points']
lam_domain = np.array([[0.07, .15], [0.1, 0.2]])

# Create input, output, and discretization from data read from file
input_sample_set = sample.sample_set(points.shape[0])
input_sample_set.set_values(points.transpose())
input_sample_set.set_domain(lam_domain)
print "Finished loading data"


def postprocess(station_nums, ref_num):

    filename = 'P_q' + str(station_nums[0] + 1) + '_q' + str(station_nums[1] +
                                                             1)
    if len(station_nums) == 3:
        filename += '_q' + str(station_nums[2] + 1)
    filename += '_ref_' + str(ref_num + 1)

    data = Q[:, station_nums]
    output_sample_set = sample.sample_set(data.shape[1])
示例#33
0
    def slice(self,
              dims=None):
        r"""
        Slices the left and right of the comparison.

        :param list dims: list of indices (dimensions) of sample set to include

        :rtype: :class:`~bet.sample.comparison`
        :returns: sliced comparison

        """
        slice_list = ['_values', '_values_local',
                      '_error_estimates', '_error_estimates_local',
                      ]
        slice_list2 = ['_jacobians', '_jacobians_local']

        comp_ss = samp.sample_set(len(dims))
        left_ss = samp.sample_set(len(dims))
        right_ss = samp.sample_set(len(dims))

        if self._comparison_sample_set._domain is not None:
            comp_ss.set_domain(self._comparison_sample_set._domain[dims, :])

        if self._left_sample_set._domain is not None:
            left_ss.set_domain(self._left_sample_set._domain[dims, :])
        if self._left_sample_set._reference_value is not None:
            left_ss.set_reference_value(
                self._left_sample_set._reference_value[dims])

        if self._right_sample_set._domain is not None:
            right_ss.set_domain(self._right_sample_set._domain[dims, :])
        if self._right_sample_set._reference_value is not None:
            right_ss.set_reference_value(
                self._right_sample_set._reference_value[dims])

        for obj in slice_list:
            val = getattr(self._left_sample_set, obj)
            if val is not None:
                setattr(left_ss, obj, val[:, dims])
            val = getattr(self._right_sample_set, obj)
            if val is not None:
                setattr(right_ss, obj, val[:, dims])
            val = getattr(self._comparison_sample_set, obj)
            if val is not None:
                setattr(comp_ss, obj, val[:, dims])
        for obj in slice_list2:
            val = getattr(self._left_sample_set, obj)
            if val is not None:
                nval = np.copy(val)
                nval = nval.take(dims, axis=1)
                nval = nval.take(dims, axis=2)
                setattr(left_ss, obj, nval)
            val = getattr(self._right_sample_set, obj)
            if val is not None:
                nval = np.copy(val)
                nval = nval.take(dims, axis=1)
                nval = nval.take(dims, axis=2)
                setattr(right_ss, obj, nval)

        comp = comparison(sample_set_left=left_ss,
                          sample_set_right=right_ss,
                          comparison_sample_set=comp_ss)
        # additional attributes to copy over here. TODO: maybe slice through
        return comp
示例#34
0
    def compute_QoI_and_create_discretization(self,
                                              input_sample_set,
                                              savefile=None,
                                              globalize=True):
        """
        Samples the model at ``input_sample_set`` and saves the results.

        Note: There are many ways to generate samples on a regular grid in
        Numpy and other Python packages. Instead of reimplementing them here we
        provide sampler that utilizes user specified samples.

        :param input_sample_set: samples to evaluate the model at
        :type input_sample_set: :class:`~bet.sample.sample_set` with
            num_smaples
        :param string savefile: filename to save samples and data
        :param bool globalize: Makes local variables global. 

        :rtype: :class:`~bet.sample.discretization` 
        :returns: :class:`~bet.sample.discretization` object which contains
            input and output of ``num_samples`` 

        """

        # Update the number of samples
        self.num_samples = input_sample_set.check_num()

        # Solve the model at the samples
        if input_sample_set._values_local is None:
            input_sample_set.global_to_local()

        local_output = self.lb_model(input_sample_set.get_values_local())

        if isinstance(local_output, np.ndarray):
            local_output_values = local_output
        elif isinstance(local_output, tuple):
            if len(local_output) == 1:
                local_output_values = local_output[0]
            elif len(local_output) == 2 and self.error_estimates:
                (local_output_values, local_output_ee) = local_output
            elif len(local_output) == 2 and self.jacobians:
                (local_output_values, local_output_jac) = local_output
            elif len(local_output) == 3:
                (local_output_values, local_output_ee, local_output_jac) = \
                    local_output
        else:
            raise bad_object("lb_model is not returning the proper type")

        # figure out the dimension of the output
        if len(local_output_values.shape) <= 1:
            output_dim = 1
        else:
            output_dim = local_output_values.shape[1]

        output_sample_set = sample.sample_set(output_dim)
        output_sample_set.set_values_local(local_output_values)
        lam_ref = input_sample_set._reference_value

        if lam_ref is not None:
            try:
                if not isinstance(lam_ref, collections.Iterable):
                    lam_ref = np.array([lam_ref])
                Q_ref = self.lb_model(lam_ref)
                output_sample_set.set_reference_value(Q_ref)
            except ValueError:
                try:
                    msg = "Model not mapping reference value as expected."
                    msg += "Attempting reshape..."
                    logging.log(20, msg)
                    Q_ref = self.lb_model(lam_ref.reshape(1, -1))
                    output_sample_set.set_reference_value(Q_ref)
                except ValueError:
                    logging.log(20, 'Unable to map reference value.')

        if self.error_estimates:
            output_sample_set.set_error_estimates_local(local_output_ee)

        if self.jacobians:
            input_sample_set.set_jacobians_local(local_output_jac)

        if globalize:
            input_sample_set.local_to_global()
            output_sample_set.local_to_global()
        else:
            input_sample_set._values = None

        comm.barrier()

        discretization = sample.discretization(input_sample_set,
                                               output_sample_set)
        comm.barrier()

        mdat = dict()
        self.update_mdict(mdat)

        if savefile is not None:
            self.save(mdat, savefile, discretization, globalize=globalize)

        comm.barrier()

        return discretization
示例#35
0
def verify_create_random_discretization(model, sampler, sample_type,
                                        input_domain, num_samples, savefile):

    np.random.seed(1)
    # recreate the samples
    if num_samples is None:
        num_samples = sampler.num_samples

    input_sample_set = sample_set(input_domain.shape[0])
    input_sample_set.set_domain(input_domain)

    input_left = np.repeat([input_domain[:, 0]], num_samples, 0)
    input_right = np.repeat([input_domain[:, 1]], num_samples, 0)

    input_values = (input_right - input_left)
    if sample_type == "lhs":
        input_values = input_values * pyDOE.lhs(input_sample_set.get_dim(),
                                                num_samples, 'center')
    elif sample_type == "random" or "r":
        input_values = input_values * np.random.random(input_left.shape)
    input_values = input_values + input_left
    input_sample_set.set_values(input_values)

    # evalulate the model at the samples directly
    output_values = (model(input_sample_set._values))
    if len(output_values.shape) == 1:
        output_sample_set = sample_set(1)
    else:
        output_sample_set = sample_set(output_values.shape[1])
    output_sample_set.set_values(output_values)

    # reset the random seed
    np.random.seed(1)
    comm.barrier()
    # create the random discretization using a specified input domain
    my_discretization = sampler.create_random_discretization(
        sample_type,
        input_domain,
        savefile,
        num_samples=num_samples,
        globalize=True)
    #comm.barrier()
    my_num = my_discretization.check_nums()

    # make sure that the samples are within the boundaries
    assert np.all(my_discretization._input_sample_set._values <= input_right)
    assert np.all(my_discretization._input_sample_set._values >= input_left)

    if comm.size == 0:
        # compare the samples
        nptest.assert_array_equal(input_sample_set._values,
                                  my_discretization._input_sample_set._values)
        # compare the data
        nptest.assert_array_equal(output_sample_set._values,
                                  my_discretization._output_sample_set._values)

    # did num_samples get updated?
    assert my_num == sampler.num_samples

    # did the file get correctly saved?
    saved_disc = bet.sample.load_discretization(savefile)

    # compare the samples
    nptest.assert_array_equal(my_discretization._input_sample_set.get_values(),
                              saved_disc._input_sample_set.get_values())
    # compare the data
    nptest.assert_array_equal(
        my_discretization._output_sample_set.get_values(),
        saved_disc._output_sample_set.get_values())

    # reset the random seed
    np.random.seed(1)

    my_sample_set = sample_set(input_domain.shape[0])
    my_sample_set.set_domain(input_domain)
    #comm.barrier()
    # create the random discretization using an initialized sample_set
    my_discretization = sampler.create_random_discretization(
        sample_type,
        my_sample_set,
        savefile,
        num_samples=num_samples,
        globalize=True)
    my_num = my_discretization.check_nums()

    # make sure that the samples are within the boundaries
    assert np.all(my_discretization._input_sample_set._values <= input_right)
    assert np.all(my_discretization._input_sample_set._values >= input_left)

    if comm.size == 0:
        # compare the samples
        nptest.assert_array_equal(input_sample_set._values,
                                  my_discretization._input_sample_set._values)
        # compare the data
        nptest.assert_array_equal(output_sample_set._values,
                                  my_discretization._output_sample_set._values)

    # reset the random seed
    np.random.seed(1)
    # recreate the samples to test default choices with unit hypercube domain
    if num_samples is None:
        num_samples = sampler.num_samples

    my_dim = input_domain.shape[0]
    input_sample_set = sample_set(my_dim)
    input_sample_set.set_domain(np.repeat([[0.0, 1.0]], my_dim, axis=0))

    input_left = np.repeat([input_domain[:, 0]], num_samples, 0)
    input_right = np.repeat([input_domain[:, 1]], num_samples, 0)

    input_values = (input_right - input_left)
    if sample_type == "lhs":
        input_values = input_values * pyDOE.lhs(input_sample_set.get_dim(),
                                                num_samples, 'center')
    elif sample_type == "random" or "r":
        input_values = input_values * np.random.random(input_left.shape)
    input_values = input_values + input_left
    input_sample_set.set_values(input_values)

    # reset random seed
    np.random.seed(1)
    comm.barrier()
    # create the random discretization using a specified input_dim
    my_discretization = sampler.create_random_discretization(
        sample_type, my_dim, savefile, num_samples=num_samples, globalize=True)
    #comm.barrier()
    my_num = my_discretization.check_nums()

    # make sure that the samples are within the boundaries
    assert np.all(my_discretization._input_sample_set._values <= input_right)
    assert np.all(my_discretization._input_sample_set._values >= input_left)

    if comm.size == 0:
        # compare the samples
        nptest.assert_array_equal(input_sample_set._values,
                                  my_discretization._input_sample_set._values)
        # compare the data
        nptest.assert_array_equal(output_sample_set._values,
                                  my_discretization._output_sample_set._values)
示例#36
0
def test_loadmat_init():
    """
    Tests :meth:`bet.sampling.adaptiveSampling.loadmat` and
    :meth:`bet.sampling.adaptiveSampling.sampler.init`.
    """
    np.random.seed(1)
    chain_length = 5

    mdat1 = {'num_samples': 50, 'chain_length': chain_length}
    mdat2 = {'num_samples': 60, 'chain_length': chain_length}
    model = "this is not a model"

    num_samples = np.array([50, 60])
    num_chains_pproc1, num_chains_pproc2 = np.ceil(
        num_samples / float(chain_length * comm.size)).astype('int')
    num_chains1, num_chains2 = comm.size * np.array(
        [num_chains_pproc1, num_chains_pproc2])
    num_samples1, num_samples2 = chain_length * np.array(
        [num_chains1, num_chains2])

    my_input1 = sample_set(1)
    my_input1.set_values(np.random.random((num_samples1, 1)))
    my_output1 = sample_set(1)
    my_output1.set_values(np.random.random((num_samples1, 1)))
    my_input2 = sample_set(1)
    my_input2.set_values(np.random.random((num_samples2, 1)))
    my_output2 = sample_set(1)
    my_output2.set_values(np.random.random((num_samples2, 1)))

    mdat1['num_chains'] = num_chains1
    mdat1['kern_old'] = np.random.random((num_chains1, ))
    mdat1['step_ratios'] = np.random.random((num_samples1, ))
    mdat2['num_chains'] = num_chains2
    mdat2['kern_old'] = np.random.random((num_chains2, ))
    mdat2['step_ratios'] = np.random.random((num_samples2, ))

    sio.savemat(os.path.join(local_path, 'testfile1'), mdat1)
    sio.savemat(os.path.join(local_path, 'testfile2'), mdat2)

    bet.sample.save_discretization(disc(my_input1, my_output1),
                                   os.path.join(local_path, 'testfile1'),
                                   globalize=True)
    bet.sample.save_discretization(disc(my_input2, my_output2),
                                   os.path.join(local_path, 'testfile2'),
                                   globalize=True)
    loaded_sampler1, discretization1, _, _ = asam.loadmat(os.path.join(
        local_path, 'testfile1'),
                                                          hot_start=2)
    nptest.assert_array_equal(discretization1._input_sample_set.get_values(),
                              my_input1.get_values())
    nptest.assert_array_equal(discretization1._output_sample_set.get_values(),
                              my_output1.get_values())
    assert loaded_sampler1.num_samples == num_samples1
    assert loaded_sampler1.chain_length == chain_length
    assert loaded_sampler1.num_chains_pproc == num_chains_pproc1
    assert loaded_sampler1.num_chains == num_chains1
    nptest.assert_array_equal(
        np.repeat(np.arange(num_chains1), chain_length, 0),
        loaded_sampler1.sample_batch_no)
    assert loaded_sampler1.lb_model is None

    loaded_sampler2, discretization2, _, _ = asam.loadmat(os.path.join(
        local_path, 'testfile2'),
                                                          lb_model=model,
                                                          hot_start=2)
    nptest.assert_array_equal(discretization2._input_sample_set.get_values(),
                              my_input2.get_values())
    assert loaded_sampler2.num_samples == num_samples2
    assert loaded_sampler2.chain_length == chain_length
    assert loaded_sampler2.num_chains_pproc == num_chains_pproc2
    assert loaded_sampler2.num_chains == num_chains2
    nptest.assert_array_equal(
        np.repeat(np.arange(num_chains2), chain_length, 0),
        loaded_sampler2.sample_batch_no)
    nptest.assert_array_equal(discretization2._output_sample_set.get_values(),
                              my_output2.get_values())
    comm.barrier()
    if comm.rank == 0:
        if os.path.exists(os.path.join(local_path, 'testfile1.mat')):
            os.remove(os.path.join(local_path, 'testfile1.mat'))
        if os.path.exists(os.path.join(local_path, 'testfile2.mat')):
            os.remove(os.path.join(local_path, 'testfile2.mat'))
示例#37
0
def sample_prob(percentile, sample_set, sort=True, descending=False):
    """
    This calculates the highest/lowest probability samples whose probability
    sum to a given value.
    A new sample_set with the samples corresponding to these highest/lowest
    probability samples is returned along with the number of samples and
    the indices.
    This uses :meth:`~bet.postProcess.sort_by_rho`.
    The ``descending`` flag determines whether or not to calcuate the
    highest/lowest.

    :param percentile: ratio of highest probability samples to select
    :type percentile: float
    :param sample_set: Object containing samples and probabilities
    :type sample_set: :class:`~bet.sample.sample_set_base` or
        :class:`~bet.sample.discretization`
    :type indices: :class:`numpy.ndarray` of shape (num_samples,)
    :param indices: sorting indices
    :param bool sort: Flag whether or not to sort
    :param bool descending: Flag order of sorting
    :param sample_set_out: Object containing sorted samples and probabilities
    :type sample_set_out: :class:`~bet.sample.sample_set` or
        :class:`~bet.sample.discretization`

    :rtype: tuple
    :returns: ( num_samples, sample_set_out, data)

    """
    if isinstance(sample_set, sample.discretization):
        samples = sample_set._input_sample_set.get_values()
        P_samples = sample_set._input_sample_set.get_probabilities()
        lam_vol = sample_set._input_sample_set.get_volumes()
        data = sample_set._output_sample_set.get_values()
    elif isinstance(sample_set, sample.sample_set_base):
        samples = sample_set.get_values()
        P_samples = sample_set.get_probabilities()
        lam_vol = sample_set.get_volumes()
        data = None
    else:
        raise bad_object("Improper sample object")

    if sort:
        (sample_set, indices) = sort_by_rho(sample_set)
        if isinstance(sample_set, sample.discretization):
            samples = sample_set._input_sample_set.get_values()
            P_samples = sample_set._input_sample_set.get_probabilities()
            lam_vol = sample_set._input_sample_set.get_volumes()
            data = sample_set._output_sample_set.get_values()
        elif isinstance(sample_set, sample.sample_set_base):
            samples = sample_set.get_values()
            P_samples = sample_set.get_probabilities()
            lam_vol = sample_set.get_volumes()
            data = None
    if descending:
        P_samples = P_samples[::-1]
        samples = samples[::-1]
        if lam_vol is not None:
            lam_vol = lam_vol[::-1]
        if data is not None:
            data = data[::-1]
        indices = indices[::-1]

    P_sum = np.cumsum(P_samples)
    num_samples = np.sum(np.logical_and(0.0 < P_sum, P_sum <= percentile))
    P_samples = P_samples[0:num_samples]
    samples = samples[0:num_samples, :]
    if lam_vol is not None:
        lam_vol = lam_vol[0:num_samples]
    if data is not None:
        if len(data.shape) == 1:
            data = np.expand_dims(data, axis=1)
        data = data[0:num_samples, :]

    if isinstance(sample_set, sample.discretization):
        samples_out = sample.sample_set(sample_set._input_sample_set.get_dim())
        data_out = sample.sample_set(sample_set._output_sample_set.get_dim())
        sample_set_out = sample.discretization(samples_out, data_out)
        sample_set_out._input_sample_set.set_values(samples)
        sample_set_out._input_sample_set.set_probabilities(P_samples)
        sample_set_out._input_sample_set.set_volumes(lam_vol)
        sample_set_out._output_sample_set.set_values(data)
    else:
        sample_set_out = sample.sample_set(sample_set.get_dim())
        sample_set_out.set_values(samples)
        sample_set_out.set_probabilities(P_samples)
        sample_set_out.set_volumes(lam_vol)

    return (num_samples, sample_set_out, indices[0:num_samples])
示例#38
0
def sort_by_rho(sample_set):
    """
    This sorts the samples within the sample_set by probability density.
    If a discretization object is given, then the QoI data is also sorted
    to maintain the correspondence.
    Any volumes present in the input space (or just the sample object)
    are also sorted.

    :param sample_set: Object containing samples and probabilities
    :type sample_set: :class:`~bet.sample.sample_set_base` or
        :class:`~bet.sample.discretization`
    :param indices: sorting indices
    :type indices: :class:`numpy.ndarray` of shape (num_samples,)
    :param sample_set_out: Object containing sorted samples and probabilities
    :type sample_set_out: :class:`~bet.sample.sample_set` or
        :class:`~bet.sample.discretization`

    :rtype: tuple
    :returns: (sample_set_out, indicices)

    """
    if isinstance(sample_set, sample.discretization):
        samples = sample_set._input_sample_set.get_values()
        P_samples = sample_set._input_sample_set.get_probabilities()
        lam_vol = sample_set._input_sample_set.get_volumes()
        data = sample_set._output_sample_set.get_values()
    elif isinstance(sample_set, sample.sample_set_base):
        samples = sample_set.get_values()
        P_samples = sample_set.get_probabilities()
        lam_vol = sample_set.get_volumes()
        data = None
    else:
        raise bad_object("Improper sample object")

    nnz = np.sum(P_samples > 0)
    if lam_vol is None:
        indices = np.argsort(P_samples)[::-1][0:nnz]
    else:
        indices = np.argsort(P_samples / lam_vol)[::-1][0:nnz]
    P_samples = P_samples[indices]
    samples = samples[indices, :]
    if lam_vol is not None:
        lam_vol = lam_vol[indices]
    if data is not None:
        data = data[indices, :]

    if isinstance(sample_set, sample.discretization):
        samples_out = sample.sample_set(sample_set._input_sample_set.get_dim())
        data_out = sample.sample_set(sample_set._output_sample_set.get_dim())
        sample_set_out = sample.discretization(samples_out, data_out)
        sample_set_out._input_sample_set.set_values(samples)
        sample_set_out._input_sample_set.set_probabilities(P_samples)
        sample_set_out._input_sample_set.set_volumes(lam_vol)
        sample_set_out._output_sample_set.set_values(data)
    else:
        sample_set_out = sample.sample_set(sample_set.get_dim())
        sample_set_out.set_values(samples)
        sample_set_out.set_probabilities(P_samples)
        sample_set_out.set_volumes(lam_vol)

    return (sample_set_out, indices)
示例#39
0
    def generalized_chains(self,
                           input_obj,
                           t_set,
                           kern,
                           savefile,
                           initial_sample_type="random",
                           criterion='center',
                           hot_start=0):
        """
        Basic adaptive sampling algorithm using generalized chains.

        .. todo::

            Test HOTSTART from parallel files using different num proc

        :param string initial_sample_type: type of initial sample random (or r),
            latin hypercube(lhs), or space-filling curve(TBD)
        :param input_obj: Either a :class:`bet.sample.sample_set` object for an
            input space, an array of min and max bounds for the input values
            with ``min = input_domain[:, 0]`` and ``max = input_domain[:, 1]``,
            or the dimension of an input space
        :type input_obj: :class:`~bet.sample.sample_set`,
            :class:`numpy.ndarray` of shape (ndim, 2), or :class: `int`
        :param t_set: method for creating new parameter steps using
            given a step size based on the paramter domain size
        :type t_set: :class:`bet.sampling.adaptiveSampling.transition_set`
        :param kern: functional that acts on the data used to
            determine the proposed change to the ``step_size``
        :type kernel: :class:~`bet.sampling.adaptiveSampling.kernel` object.
        :param string savefile: filename to save samples and data
        :param int hot_start: Flag whether or not hot start the sampling
            chains from a previous set of chains. Note that ``num_chains`` must
            be the same, but ``num_chains_pproc`` need not be the same. 0 -
            cold start, 1 - hot start from uncompleted run, 2 - hot
            start from finished run
        :param string criterion: latin hypercube criterion see 
            `PyDOE <http://pythonhosted.org/pyDOE/randomized.html>`_
        
        :rtype: tuple
        :returns: (``discretization``, ``all_step_ratios``) where
            ``discretization`` is a :class:`~bet.sample.discretization` object
            containing ``num_samples``  and  ``all_step_ratios`` is np.ndarray
            of shape ``(num_chains, chain_length)``
        
        """

        # Calculate step_size
        max_ratio = t_set.max_ratio
        min_ratio = t_set.min_ratio

        if not hot_start:
            logging.info("COLD START")
            step_ratio = t_set.init_ratio * np.ones(self.num_chains_pproc)

            # Initiative first batch of N samples (maybe taken from latin
            # hypercube/space-filling curve to fully explore parameter space -
            # not necessarily random). Call these Samples_old.
            disc_old = super(sampler, self).create_random_discretization(
                initial_sample_type,
                input_obj,
                savefile,
                self.num_chains,
                criterion,
                globalize=False)
            self.num_samples = self.chain_length * self.num_chains
            comm.Barrier()

            # populate local values
            #disc_old._input_sample_set.global_to_local()
            #disc_old._output_sample_set.global_to_local()
            input_old = disc_old._input_sample_set.copy()

            disc = disc_old.copy()
            all_step_ratios = step_ratio

            (kern_old, proposal) = kern.delta_step(disc_old.\
                    _output_sample_set.get_values_local(), None)

            start_ind = 1

        if hot_start:
            # LOAD FILES
            _, disc, all_step_ratios, kern_old = loadmat(
                savefile,
                lb_model=None,
                hot_start=hot_start,
                num_chains=self.num_chains)
            # MAKE SURE ARRAYS ARE LOCALIZED FROM HERE ON OUT WILL ONLY
            # OPERATE ON _local_values
            # Set mdat, step_ratio, input_old, start_ind appropriately
            step_ratio = all_step_ratios[-self.num_chains_pproc:]
            input_old = sample.sample_set(disc._input_sample_set.get_dim())
            input_old.set_domain(disc._input_sample_set.get_domain())
            input_old.set_values_local(disc._input_sample_set.\
                    get_values_local()[-self.num_chains_pproc:, :])

            # Determine how many batches have been run
            start_ind = disc._input_sample_set.get_values_local().\
                    shape[0]/self.num_chains_pproc

        mdat = dict()
        self.update_mdict(mdat)
        input_old.update_bounds_local()

        for batch in xrange(start_ind, self.chain_length):
            # For each of N samples_old, create N new parameter samples using
            # transition set and step_ratio. Call these samples input_new.
            input_new = t_set.step(step_ratio, input_old)

            # Solve the model for the input_new.
            output_new_values = self.lb_model(input_new.get_values_local())

            # Make some decision about changing step_size(k).  There are
            # multiple ways to do this.
            # Determine step size
            (kern_old, proposal) = kern.delta_step(output_new_values, kern_old)
            step_ratio = proposal * step_ratio
            # Is the ratio greater than max?
            step_ratio[step_ratio > max_ratio] = max_ratio
            # Is the ratio less than min?
            step_ratio[step_ratio < min_ratio] = min_ratio

            # Save and export concatentated arrays
            if self.chain_length < 4:
                pass
            elif comm.rank == 0 and (batch + 1) % (self.chain_length / 4) == 0:
                logging.info("Current chain length: "+\
                            str(batch+1)+"/"+str(self.chain_length))
            disc._input_sample_set.append_values_local(input_new.\
                    get_values_local())
            disc._output_sample_set.append_values_local(output_new_values)
            all_step_ratios = np.concatenate((all_step_ratios, step_ratio))
            mdat['step_ratios'] = all_step_ratios
            mdat['kern_old'] = kern_old

            super(sampler, self).save(mdat, savefile, disc, globalize=False)
            input_old = input_new

        # collect everything
        disc._input_sample_set.update_bounds_local()
        #disc._input_sample_set.local_to_global()
        #disc._output_sample_set.local_to_global()

        MYall_step_ratios = np.copy(all_step_ratios)
        # ``all_step_ratios`` is np.ndarray of shape (num_chains,
        # chain_length)
        all_step_ratios = util.get_global_values(MYall_step_ratios,
                                                 shape=(self.num_samples, ))
        all_step_ratios = np.reshape(all_step_ratios,
                                     (self.num_chains, self.chain_length), 'F')

        # save everything
        mdat['step_ratios'] = all_step_ratios
        mdat['kern_old'] = util.get_global_values(kern_old,
                                                  shape=(self.num_chains, ))
        super(sampler, self).save(mdat, savefile, disc, globalize=True)

        return (disc, all_step_ratios)
示例#40
0
def rho_D(outputs):
    rho_left = np.repeat([Q_ref - .5 * bin_size], outputs.shape[0], 0)
    rho_right = np.repeat([Q_ref + .5 * bin_size], outputs.shape[0], 0)
    rho_left = np.all(np.greater_equal(outputs, rho_left), axis=1)
    rho_right = np.all(np.less_equal(outputs, rho_right), axis=1)
    inside = np.logical_and(rho_left, rho_right)
    max_values = np.repeat(maximum, outputs.shape[0], 0)
    return inside.astype('float64') * max_values


# Read in points_ref and plot results
ref_sample = mdat['points_true']
ref_sample = ref_sample[:, 14]

# Create input, output, and discretization from data read from file
input_sample_set = sample.sample_set(points.shape[0])
input_sample_set.set_values(points.transpose())
input_sample_set.set_domain(param_domain)
output_sample_set = sample.sample_set(Q.shape[1])
output_sample_set.set_values(Q)
my_disc = sample.discretization(input_sample_set, output_sample_set)

# Show the samples in the parameter space
pDom.scatter_rhoD(my_disc, rho_D=rho_D, ref_sample=ref_sample, io_flag='input')
# Show the corresponding samples in the data space
pDom.scatter_rhoD(output_sample_set,
                  rho_D=rho_D,
                  ref_sample=Q_ref,
                  io_flag='output')

# Show multiple data domains that correspond with the convex hull of samples in
示例#41
0
    def generate_for_input_set(self, input_sample_set, order=0):
        """
        Generates a surrogate discretization based on the input discretization,
        for a user-defined input sample set. The output sample set values
        and error estimates are piecewise polynomially defined over input sample
        set cells from the input discretization. For order 0, both are piecewise
        constant. For order 1, values are piecewise linear (assuming Jacobians
        exist), and error estimates are piecewise constant.

        :param input_sample_set: input sample set for surrogate discretization
        :type set_old: :class:`~bet.sample.sample_set_base`
        :param order: Polynomial order
        :type order: int

        :rtype: :class:`~bet.sample.discretization`
        :returns: discretization defining the surrogate model

        """
        # Check inputs
        if order not in [0, 1]:
            msg = "Order must be 0 or 1."
            raise calculateError.wrong_argument_type(msg)
        input_sample_set.check_num()
        if input_sample_set._dim != self.input_disc._input_sample_set._dim:
            msg = "Dimensions of input sets are not equal."
            raise sample.dim_not_matching(msg)

        # Give properties from input discretization.
        if input_sample_set._domain is None:
            if self.input_disc._input_sample_set._domain is not None:
                input_sample_set.set_domain(self.input_disc.\
                        _input_sample_set._domain)
        if input_sample_set._p_norm is None:
            if self.input_disc._input_sample_set._p_norm is not None:
                input_sample_set.set_p_norm(self.input_disc.\
                        _input_sample_set._p_norm)

        # Setup dummy discretizion to get pointers
        # Assumes Voronoi sample set for now
        output_sample_set = sample.sample_set(self.input_disc.\
                _output_sample_set._dim)
        self.dummy_disc = self.input_disc.copy()
        self.dummy_disc.set_emulated_input_sample_set(input_sample_set)
        self.dummy_disc.set_emulated_ii_ptr(globalize=False)

        if order == 0:
            # define new values based on piecewise constants
            new_values_local = self.input_disc._output_sample_set.\
                    _values[self.dummy_disc._emulated_ii_ptr_local]
            output_sample_set.set_values_local(new_values_local)
        elif order == 1:
            # define new values based on piecewise linears using Jacobians
            if self.input_disc._input_sample_set._jacobians is None:
                if self.input_disc._input_sample_set._jacobians_local is None:
                    msg = "The input discretization must"
                    msg += " have jacobians defined."
                    raise calculateError.wrong_argument_type(msg)
                else:
                    self.input_disc._input_sample_set.local_to_global()

            jac_local = self.input_disc._input_sample_set._jacobians[\
                    self.dummy_disc._emulated_ii_ptr_local]
            diff_local = self.input_disc._input_sample_set._values[\
                    self.dummy_disc._emulated_ii_ptr_local] - \
                    input_sample_set._values_local
            new_values_local = self.input_disc._output_sample_set._values[\
                    self.dummy_disc._emulated_ii_ptr_local]
            new_values_local += np.einsum('ijk,ik->ij', jac_local, diff_local)
            output_sample_set.set_values_local(new_values_local)

        # if they exist, define error estimates with piecewise constants
        if self.input_disc._output_sample_set._error_estimates is not None:
            new_ee = self.input_disc._output_sample_set._error_estimates[\
                    self.dummy_disc._emulated_ii_ptr_local]
            output_sample_set.set_error_estimates_local(new_ee)
        # create discretization object for the surrogate
        self.surrogate_discretization = sample.discretization(input_sample_set\
                =input_sample_set, output_sample_set=output_sample_set,
                output_probability_set=self.input_disc._output_probability_set)
        return self.surrogate_discretization
示例#42
0
def calculate_gradients_cfd(cluster_discretization, normalize=True):
    """
    Approximate gradient vectors at ``num_centers, centers.shape[0]`` points
    in the parameter space for each QoI map.  THIS METHOD IS DEPENDENT
    ON USING :meth:~bet.sensitivity.pick_cfd_points TO CHOOSE SAMPLES FOR THE 
    CFD STENCIL AROUND EACH CENTER.  THE ORDERING MATTERS.
    
    :param cluster_discretization: Must contain input and output values for the
        sample clusters.
    :type cluster_discretization: :class:`~bet.sample.discretization`
    :param boolean normalize:  If normalize is True, normalize each gradient
        vector
    
    :rtype: :class:`~bet.sample.discretization`
    :returns: A new :class:`~bet.sample.discretization` that contains only the
        centers of the clusters and their associated ``_jacobians`` which are
        tensor representation of the gradient vectors of each QoI map at each
        point in centers :class:`numpy.ndarray` of shape (num_samples,
        output_dim, input_dim)
    
    """
    if cluster_discretization._input_sample_set.get_values() is None \
            or cluster_discretization._output_sample_set.get_values() is None:
        raise ValueError("You must have values to use this method.")
    samples = cluster_discretization._input_sample_set.get_values()
    data = cluster_discretization._output_sample_set.get_values()

    input_dim = cluster_discretization._input_sample_set.get_dim()
    num_model_samples = cluster_discretization.check_nums()
    output_dim = cluster_discretization._output_sample_set.get_dim()

    num_model_samples = cluster_discretization.check_nums()
    input_dim = cluster_discretization._input_sample_set.get_dim()

    num_centers = num_model_samples / (2 * input_dim + 1)

    # Find radii_vec from the first cluster of samples
    radii_vec = samples[num_centers:num_centers + input_dim, :] - samples[0, :]
    radii_vec = util.fix_dimensions_vector_2darray(radii_vec.diagonal())

    # Clean the data
    data = util.clean_data(data[num_centers:])
    gradient_tensor = np.zeros([num_centers, output_dim, input_dim])

    radii_vec = np.tile(np.repeat(radii_vec, output_dim, axis=1),
                        [num_centers, 1])

    # Construct indices for CFD gradient approxiation
    inds = np.repeat(range(0, 2 * input_dim * num_centers, 2 * input_dim),
                     input_dim) + np.tile(range(0, input_dim), num_centers)
    inds = np.array([inds, inds + input_dim]).transpose()

    gradient_mat = (data[inds[:, 0]] - data[inds[:, 1]]) * (0.5 / radii_vec)

    # Reshape and organize
    gradient_tensor = np.reshape(gradient_mat.transpose(),
                                 [output_dim, input_dim, num_centers],
                                 order='F').transpose(2, 0, 1)

    if normalize:
        # Compute the norm of each vector
        norm_gradient_tensor = np.linalg.norm(gradient_tensor, ord=1, axis=2)

        # If it is a zero vector (has 0 norm), set norm=1, avoid divide by zero
        norm_gradient_tensor[norm_gradient_tensor == 0] = 1.0

        # Normalize each gradient vector
        gradient_tensor = gradient_tensor / np.tile(
            norm_gradient_tensor, (input_dim, 1, 1)).transpose(1, 2, 0)

    center_input_sample_set = sample.sample_set(input_dim)
    center_input_sample_set.set_values(samples[:num_centers, :])
    if cluster_discretization._input_sample_set.get_domain() is not None:
        center_input_sample_set.set_domain(cluster_discretization.\
                _input_sample_set.get_domain())
    center_input_sample_set.set_jacobians(gradient_tensor)
    center_output_sample_set = sample.sample_set(output_dim)
    center_output_sample_set.set_values(data[:num_centers, :])
    if cluster_discretization._output_sample_set.get_domain() is not None:
        center_output_sample_set.set_domain(cluster_discretization.\
                _output_sample_set.get_domain())
    #center_output_sample_set.set_jacobians(gradient_tensor.transpose())
    center_discretization = sample.discretization(center_input_sample_set,
                                                  center_output_sample_set)
    return center_discretization
示例#43
0
def calculate_gradients_rbf(cluster_discretization,
                            num_centers=None,
                            num_neighbors=None,
                            RBF=None,
                            ep=None,
                            normalize=True):
    r"""
    Approximate gradient vectors at ``num_centers, centers.shape[0]`` points
    in the parameter space for each QoI map using a radial basis function
    interpolation method.
    
    :param cluster_discretization: Must contain input and output values for the
        sample clusters.
    :type cluster_discretization: :class:`~bet.sample.discretization`
    :param int num_centers: The number of cluster centers.
    :param int num_neighbors: Number of nearest neighbors to use in gradient
        approximation. Default value is ``input_dim + 2``
    :param string RBF: Choice of radial basis function. Default is Gaussian
    :param float ep: Choice of shape parameter for radial basis function.
        Default value is 1.0
    :param boolean normalize:  If normalize is True, normalize each gradient
        vector
    
    :rtype: :class:`~bet.sample.discretization`
    :returns: A new :class:`~bet.sample.discretization` that contains only the
        centers of the clusters and their associated ``_jacobians`` which are
        tensor representation of the gradient vectors of each QoI map at each
        point in centers :class:`numpy.ndarray` of shape (num_samples,
        output_dim, input_dim)
    
    """
    if cluster_discretization._input_sample_set.get_values() is None \
            or cluster_discretization._output_sample_set.get_values() is None:
        raise ValueError("You must have values to use this method.")
    samples = cluster_discretization._input_sample_set.get_values()
    data = cluster_discretization._output_sample_set.get_values()

    input_dim = cluster_discretization._input_sample_set.get_dim()
    num_model_samples = cluster_discretization.check_nums()
    output_dim = cluster_discretization._output_sample_set.get_dim()

    if num_neighbors is None:
        num_neighbors = input_dim + 2
    if ep is None:
        ep = 1.0
    if RBF is None:
        RBF = 'Gaussian'

    # If centers is None we assume the user chose clusters of size
    # input_dim + 2
    if num_centers is None:
        num_centers = num_model_samples / (input_dim + 2)
    centers = samples[:num_centers, :]

    rbf_tensor = np.zeros([num_centers, num_model_samples, input_dim])
    gradient_tensor = np.zeros([num_centers, output_dim, input_dim])

    # For each center, interpolate the data using the rbf chosen and
    # then evaluate the partial derivative of that interpolant at the desired
    # point.
    for c in range(num_centers):
        # Find the k nearest neighbors and their distances to centers[c,:]
        [r, nearest] = cluster_discretization._input_sample_set.query(\
                centers[c, :], k=num_neighbors)
        r = np.tile(r, (input_dim, 1))

        # Compute the linf distances to each of the nearest neighbors
        diffVec = (centers[c, :] - samples[nearest, :]).transpose()

        # Compute the l2 distances between pairs of nearest neighbors
        distMat = spatial.distance_matrix(samples[nearest, :],
                                          samples[nearest, :])

        # Solve for the rbf weights using interpolation conditions and
        # evaluate the partial derivatives
        rbf_mat_values = \
            np.linalg.solve(radial_basis_function(distMat, RBF),
            radial_basis_function_dxi(r, diffVec, RBF, ep) \
            .transpose()).transpose()

        # Construct the finite difference matrices
        rbf_tensor[c, nearest, :] = rbf_mat_values.transpose()

    gradient_tensor = rbf_tensor.transpose(2, 0,
                                           1).dot(data).transpose(1, 2, 0)

    if normalize:
        # Compute the norm of each vector
        norm_gradient_tensor = np.linalg.norm(gradient_tensor, ord=1, axis=2)

        # If it is a zero vector (has 0 norm), set norm=1, avoid divide by zero
        norm_gradient_tensor[norm_gradient_tensor == 0] = 1.0

        # Normalize each gradient vector
        gradient_tensor = gradient_tensor / np.tile(
            norm_gradient_tensor, (input_dim, 1, 1)).transpose(1, 2, 0)

    center_input_sample_set = sample.sample_set(input_dim)
    center_input_sample_set.set_values(samples[:num_centers, :])
    if cluster_discretization._input_sample_set.get_domain() is not None:
        center_input_sample_set.set_domain(cluster_discretization.\
                _input_sample_set.get_domain())
    center_input_sample_set.set_jacobians(gradient_tensor)
    center_output_sample_set = sample.sample_set(output_dim)
    center_output_sample_set.set_values(data[:num_centers, :])
    if cluster_discretization._output_sample_set.get_domain() is not None:
        center_output_sample_set.set_domain(cluster_discretization.\
                _output_sample_set.get_domain())
    #center_output_sample_set.set_jacobians(gradient_tensor.transpose())
    center_discretization = sample.discretization(center_input_sample_set,
                                                  center_output_sample_set)
    return center_discretization
示例#44
0
import bet.postProcess.plotDomains as plotD
import bet.postProcess.postTools as postTools
import bet.sample as samp

# Labels and descriptions of the uncertain parameters
labels = [
    'Source $y$ coordinate [L]', 'Source $x$ coordinate [L]',
    'Dispersivity x [L]', 'Flow Angle [degrees]', 'Contaminant flux [M/T]'
]

# Load data from files
# First obtain info on the parameter domain
parameter_domain = np.loadtxt("files/lam_domain.txt.gz")  # parameter domain
parameter_dim = parameter_domain.shape[0]
# Create input sample set
input_samples = samp.sample_set(parameter_dim)
input_samples.set_domain(parameter_domain)
input_samples.set_values(np.loadtxt("files/samples.txt.gz"))
input_samples.estimate_volume_mc()  # Use standard MC estimate of volumes
# Choose which QoI to use and create output sample set
QoI_indices_observe = np.array([0, 1, 2, 3])
output_samples = samp.sample_set(QoI_indices_observe.size)
output_samples.set_values(
    np.loadtxt("files/data.txt.gz")[:, QoI_indices_observe])

# Create discretization object
my_discretization = samp.discretization(input_sample_set=input_samples,
                                        output_sample_set=output_samples)

# Load the reference parameter and QoI values
param_ref = np.loadtxt("files/lam_ref.txt.gz")  # reference parameter set
示例#45
0
def sample_lp_ball(input_set, num_close, radius, p_num=2):
    r"""
    Pick num_close points in a the Lp ball of length 2*``radii_vec`` around a
    point in the input space, do this for each point in centers.  If this box
    extends outside of the domain of the input space, we sample the
    intersection.

    :param input_set: The input sample set.  Make sure the attribute
        ``_values`` is not ``None``
    :type input_set: :class:`~bet.sample.sample_set`
    :param int num_close: Number of points in each cluster
    :param radii_vec: Each side of the box will have length ``2*radii_vec[i]``
    :type radii_vec: :class:`numpy.ndarray` of shape (``input_dim``,)
    :param float p_num: :math:`0 < p \leq \infty`, p for the lp norm where
        infinity is ``numpy.inf``
    
    :rtype: :class:`~bet.sample.sample_set`
    :returns: Centers and clusters of samples near each center (values are 
        :class:`numpy.ndarray` of shape ((``num_close+1``)*``num_centers``,
        ``input_dim``))
    
    """
    if input_set.get_values() is None:
        raise ValueError("You must have values to use this method.")
    input_dim = input_set.get_dim()
    centers = input_set.get_values()
    num_centers = input_set.check_num()
    input_domain = input_set.get_domain()

    cluster_set = sample.sample_set(input_dim)
    if input_domain is not None:
        cluster_set.set_domain(input_domain)
    cluster_set.set_values(centers)

    for i in xrange(num_centers):
        in_bounds = 0
        inflate = 1.0
        while in_bounds < num_close:
            # sample uniformly
            new_cluster = lpsam.Lp_generalized_uniform(input_dim,
                                                       num_close * inflate,
                                                       p_num, radius,
                                                       centers[i, :])
            # check bounds
            if input_domain is not None:
                cluster_set.update_bounds(num_close * inflate)
                left = np.all(np.greater_equal(new_cluster, cluster_set._left),
                              axis=1)
                right = np.all(np.less_equal(new_cluster, cluster_set._right),
                               axis=1)
                inside = np.logical_and(left, right)
                in_bounds = np.sum(inside)
                new_cluster = new_cluster[inside, :]
                # increase inflate
                inflate *= 10.0
            else:
                in_bounds = num_close

        if in_bounds > num_close:
            new_cluster = new_cluster[:num_close, :]
        cluster_set.append_values(new_cluster)

    # reset bounds
    cluster_set._left = None
    cluster_set._right = None
    cluster_set._width = None
    return cluster_set
示例#46
0
import bet.postProcess.postTools as postTools
import bet.Comm as comm
import bet.sample as sample

# Set up the info for the spaces
input_dim = 5
output_dim = 10
num_samples = 1E5
num_centers = 10

# Let the map Q be a random matrix of size (output_dim, input_dim)
np.random.seed(0)
Q = np.random.random([output_dim, input_dim])

# Initialize some sample objects we will need
input_samples = sample.sample_set(input_dim)
output_samples = sample.sample_set(output_dim)

# Choose random samples in parameter space to solve the model
input_samples.set_values(
    np.random.uniform(0, 1, [np.int(num_samples), input_dim]))

# Make the MC assumption and compute the volumes of each voronoi cell
input_samples.estimate_volume_mc()

# Compute the output values with the map Q
output_samples.set_values(
    Q.dot(input_samples.get_values().transpose()).transpose())

# Calculate the gradient vectors at some subset of the samples.  Here the
# *normalize* argument is set to *True* because we are using bin_ratio to
示例#47
0
def test_loadmat_parallel():
    """

    Tests :class:`bet.sampling.basicSampling.sampler.loadmat`.

    """
    np.random.seed(1)
    mdat1 = {'num_samples': 10}
    mdat2 = {'num_samples': 20}
    model = "this is not a model"

    my_input1 = sample_set(1)
    my_input1.set_values_local(
        np.array_split(np.random.random((10, 1)), comm.size)[comm.rank])
    my_output1 = sample_set(1)
    my_output1.set_values_local(
        np.array_split(np.random.random((10, 1)), comm.size)[comm.rank])
    my_input2 = sample_set(1)
    my_input2.set_values_local(
        np.array_split(np.random.random((20, 1)), comm.size)[comm.rank])
    my_output2 = sample_set(1)
    my_output2.set_values_local(
        np.array_split(np.random.random((20, 1)), comm.size)[comm.rank])

    file_name1 = 'testfile1.mat'
    file_name2 = 'testfile2.mat'

    if comm.size > 1:
        local_file_name1 = os.path.os.path.join(
            os.path.dirname(file_name1),
            "proc{}_{}".format(comm.rank, os.path.basename(file_name1)))
        local_file_name2 = os.path.os.path.join(
            os.path.dirname(file_name2),
            "proc{}_{}".format(comm.rank, os.path.basename(file_name2)))
    else:
        local_file_name1 = file_name1
        local_file_name2 = file_name2

    sio.savemat(local_file_name1, mdat1)
    sio.savemat(local_file_name2, mdat2)
    comm.barrier()

    bet.sample.save_discretization(disc(my_input1, my_output1),
                                   file_name1,
                                   globalize=False)
    bet.sample.save_discretization(disc(my_input2, my_output2),
                                   file_name2,
                                   "NAME",
                                   globalize=False)

    (loaded_sampler1, discretization1) = bsam.loadmat(file_name1)
    nptest.assert_array_equal(discretization1._input_sample_set.get_values(),
                              my_input1.get_values())
    nptest.assert_array_equal(discretization1._output_sample_set.get_values(),
                              my_output1.get_values())
    assert loaded_sampler1.num_samples == 10
    assert loaded_sampler1.lb_model is None

    (loaded_sampler2, discretization2) = bsam.loadmat(file_name2,
                                                      disc_name="NAME",
                                                      model=model)
    nptest.assert_array_equal(discretization2._input_sample_set.get_values(),
                              my_input2.get_values())
    nptest.assert_array_equal(discretization2._output_sample_set.get_values(),
                              my_output2.get_values())

    assert loaded_sampler2.num_samples == 20
    assert loaded_sampler2.lb_model == model
    if comm.size == 1:
        os.remove(file_name1)
        os.remove(file_name2)
    else:
        os.remove(local_file_name1)
        os.remove(local_file_name2)
示例#48
0
    def setUp(self):
        """
        Set up problem.
        """
        # Create sample_set object for input_samples
        input_samples = sample.sample_set(4)

        input_samples.set_domain(np.array([[0.0, 1.0], [0.0, 1.0],
                                           [0.0, 1.0], [0.0, 1.0]]))
        input_samples.set_values(util.meshgrid_ndim(
            (np.linspace(input_samples.get_domain()[0, 0],
                         input_samples.get_domain()[0, 1], 3),
             np.linspace(input_samples.get_domain()[1, 0],
                         input_samples.get_domain()[1, 1], 3),
             np.linspace(input_samples.get_domain()[2, 0],
                         input_samples.get_domain()[2, 1], 3),
             np.linspace(input_samples.get_domain()[3, 0],
                         input_samples.get_domain()[3, 1], 3))))
        input_samples.set_probabilities(
            (1.0/float(input_samples.get_values().shape[0]))
            * np.ones((input_samples.get_values().shape[0],)))

        # Check that probabilities and values arrays have same number of entries
        input_samples.check_num()

        # Create sample_set object for output_samples
        output_samples = sample.sample_set(4)
        output_samples.set_values(input_samples.get_values()*3.0)
        output_samples.set_domain(3.0*input_samples.get_domain())

        self.disc = sample.discretization(input_samples, output_samples)

        self.filename = "testfigure"

        output_ref_datum = np.mean(output_samples.get_domain(), axis=1)

        bin_size = 0.15*(np.max(output_samples.get_domain(), axis=1) -
                         np.min(output_samples.get_domain(), axis=1))
        maximum = 1/np.product(bin_size)

        def ifun(outputs):
            """
            Indicator function.
            :param outputs: outputs
            :type outputs: :class:`numpy.ndarray` of shape (N, ndim)
            :rtype: :class:`numpy.ndarray` of shape (N,)
            :returns: 0 if outside of set or positive number if inside set
            """
            left = np.repeat([output_ref_datum-.5*bin_size],
                             outputs.shape[0], 0)
            right = np.repeat([output_ref_datum+.5*bin_size],
                              outputs.shape[0], 0)
            left = np.all(np.greater_equal(outputs, left), axis=1)
            right = np.all(np.less_equal(outputs, right), axis=1)
            inside = np.logical_and(left, right)
            max_values = np.repeat(maximum, outputs.shape[0], 0)
            return inside.astype('float64')*max_values

        self.rho_D = ifun
        self.lnums = [1, 2, 3]
        self.markers = []

        for m in Line2D.markers:
            try:
                if len(m) == 1 and m != ' ':
                    self.markers.append(m)
            except TypeError:
                pass

        self.colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k')
示例#49
0
def regular_sample_set(input_obj, num_samples_per_dim=1):
    """
    Sampling algorithm for generating a regular grid of samples taken
    on the domain present with ``input_obj`` (a default unit hypercube
    is used if no domain has been specified)

    :param input_obj: :class:`~bet.sample.sample_set` object containing
        the dimension or domain to sample from, the domain to sample from, or
        the dimension
    :type input_obj: :class:`~bet.sample.sample_set` or :class:`numpy.ndarray`
        of shape (dim, 2) or ``int`` 
    :param num_samples_per_dim: number of samples per dimension
    :type num_samples_per_dim: :class:`~numpy.ndarray` of dimension
        ``(input_sample_set._dim,)``

    :rtype: :class:`~bet.sample.sample_set`
    :returns: :class:`~bet.sample.sample_set` object which contains
        input ``num_samples``

    """
    # check to see what the input object is
    if isinstance(input_obj, sample.sample_set):
        input_sample_set = input_obj.copy()
    elif isinstance(input_obj, int):
        input_sample_set = sample.sample_set(input_obj)
    elif isinstance(input_obj, np.ndarray):
        input_sample_set = sample.sample_set(input_obj.shape[0])
        input_sample_set.set_domain(input_obj)
    else:
        raise bad_object("Improper sample object")

    # Create N samples
    dim = input_sample_set.get_dim()

    if not isinstance(num_samples_per_dim, collections.Iterable):
        num_samples_per_dim = num_samples_per_dim * np.ones((dim, ))
    if np.any(np.less_equal(num_samples_per_dim, 0)):
        warnings.warn('Warning: num_samples_per_dim must be greater than 0')

    num_samples = int(np.product(num_samples_per_dim))

    if input_sample_set.get_domain() is None:
        # create the domain
        input_domain = np.array([[0., 1.]] * dim)
        input_sample_set.set_domain(input_domain)
    else:
        input_domain = input_sample_set.get_domain()
    # update the bounds based on the number of samples
    input_values = np.zeros((num_samples, dim))

    vec_samples_dimension = np.empty((dim), dtype=object)
    for i in range(dim):
        bin_width = (input_domain[i, 1] - input_domain[i, 0]) / \
            np.float(num_samples_per_dim[i])
        vec_samples_dimension[i] = list(
            np.linspace(input_domain[i, 0] - 0.5 * bin_width,
                        input_domain[i, 1] + 0.5 * bin_width,
                        num_samples_per_dim[i] +
                        2))[1:int(num_samples_per_dim[i] + 1)]

    arrays_samples_dimension = np.meshgrid(
        *[vec_samples_dimension[i] for i in np.arange(0, dim)], indexing='ij')

    for i in range(dim):
        input_values[:,
                     i:i + 1] = np.vstack(arrays_samples_dimension[i].flat[:])

    input_sample_set.set_values(input_values)
    input_sample_set.global_to_local()

    return input_sample_set
示例#50
0
of all space-time locations (``indexstart = 0``, ``indexstop = 1000``), or,
we can check the QoI at a particular time (e.g., setting ``indexstart=0`` and
``indexstop = 20`` considers all the spatial QoI only at the first time step).

In general, ``indexstart`` can be any integer between 0 and 998  and
``indexstop`` must be at least 2 greater than ``indexstart`` (so between
2 and 1000 subject to the additional constraint that ``indexstop``
:math: `\geq` ``indexstart + 2`` to ensure that we check at least a single pair
of QoI.)
'''
indexstart = 0
indexstop = 20
qoiIndices = range(indexstart, indexstop)

# Initialize the necessary sample objects
input_samples = sample.sample_set(2)
output_samples = sample.sample_set(1000)

# Set the input sample values from the imported file
input_samples.set_values(matfile['samples'])

# Set the data fromthe imported file
output_samples.set_values(matfile['data'])

# Create the cluster discretization
cluster_discretization = sample.discretization(input_samples, output_samples)

# Calculate the gradient vectors at each of the 16 centers for each of the
# QoI maps
if fd_scheme.upper() in ['RBF']:
    center_discretization = grad.calculate_gradients_rbf(
示例#51
0
def random_sample_set(sample_type,
                      input_obj,
                      num_samples,
                      criterion='center',
                      globalize=True):
    """
    Sampling algorithm with three basic options

        * ``random`` (or ``r``) generates ``num_samples`` samples in
            ``lam_domain`` assuming a Lebesgue measure.
        * ``lhs`` generates a latin hyper cube of samples.

    Note: This function is designed only for generalized rectangles and
    assumes a Lebesgue measure on the parameter space.

    :param string sample_type: type sampling random (or r),
        latin hypercube(lhs), regular grid (rg), or space-filling
        curve(TBD)
    :param input_obj: :class:`~bet.sample.sample_set` object containing
        the dimension/domain to sample from, domain to sample from, or the
        dimension
    :type input_obj: :class:`~bet.sample.sample_set` or
        :class:`numpy.ndarray` of shape (dim, 2) or ``int``
    :param string savefile: filename to save discretization
    :param int num_samples: N, number of samples 
    :param string criterion: latin hypercube criterion see 
        `PyDOE <http://pythonhosted.org/pyDOE/randomized.html>`_
    :param bool globalize: Makes local variables global. Only applies if
        ``parallel==True``.

    :rtype: :class:`~bet.sample.sample_set`
    :returns: :class:`~bet.sample.sample_set` object which contains
        input ``num_samples`` 

    """

    # check to see what the input object is
    if isinstance(input_obj, sample.sample_set):
        input_sample_set = input_obj.copy()
    elif isinstance(input_obj, int):
        input_sample_set = sample.sample_set(input_obj)
    elif isinstance(input_obj, np.ndarray):
        input_sample_set = sample.sample_set(input_obj.shape[0])
        input_sample_set.set_domain(input_obj)
    else:
        raise bad_object("Improper sample object")

    # Create N samples
    dim = input_sample_set.get_dim()

    if input_sample_set.get_domain() is None:
        # create the domain
        input_domain = np.array([[0., 1.]] * dim)
        input_sample_set.set_domain(input_domain)

    if sample_type == "lhs":
        # update the bounds based on the number of samples
        input_sample_set.update_bounds(num_samples)
        input_values = np.copy(input_sample_set._width)
        input_values = input_values * lhs(dim, num_samples, criterion)
        input_values = input_values + input_sample_set._left
        input_sample_set.set_values_local(
            np.array_split(input_values, comm.size)[comm.rank])
    elif sample_type == "random" or "r":
        # define local number of samples
        num_samples_local = int((num_samples / comm.size) +
                                (comm.rank < num_samples % comm.size))
        # update the bounds based on the number of samples
        input_sample_set.update_bounds_local(num_samples_local)
        input_values_local = np.copy(input_sample_set._width_local)
        input_values_local = input_values_local * \
            np.random.random(input_values_local.shape)
        input_values_local = input_values_local + input_sample_set._left_local

        input_sample_set.set_values_local(input_values_local)

    comm.barrier()

    if globalize:
        input_sample_set.local_to_global()
    else:
        input_sample_set._values = None
    return input_sample_set
示例#52
0
def user_partition_user_distribution(data_set, data_partition_set,
                                          data_distribution_set):
    r"""
    Creates a user defined simple function approximation of a user
    defined distribution. The simple function discretization is
    specified in the ``data_partition_set``, and the set of i.i.d.
    samples from the distribution is specified in the
    ``data_distribution_set``.

    :param data_set: Sample set that the probability measure is defined for.
    :type data_set: :class:`~bet.sample.discretization` or
        :class:`~bet.sample.sample_set` or :class:`~numpy.ndarray`
    :param data_partition_set: Sample set defining the discretization
        of the data space into Voronoi cells for which a simple function
        is defined upon.
    :type data_partition_set: :class:`~bet.sample.discretization` or
        :class:`~bet.sample.sample_set` or :class:`~numpy.ndarray`
    :param data_distribution_set: Sample set containing the i.i.d. samples
        from the distribution on the data space that are binned within the
        Voronoi cells implicitly defined by the data_discretization_set.
    :type data_distribution_set: :class:`~bet.sample.discretization` or
        :class:`~bet.sample.sample_set` or :class:`~numpy.ndarray`

    :rtype: :class:`~bet.sample.voronoi_sample_set`
    :returns: sample_set object defininng simple function approximation
    """

    if isinstance(data_set, samp.sample_set_base):
        s_set = data_set.copy()
        dim = s_set._dim
    elif isinstance(data_set, samp.discretization):
        s_set = data_set._output_sample_set.copy()
        dim = s_set._dim
    elif isinstance(data_set, np.ndarray):
        dim = data_set.shape[1]
        values = data_set
        s_set = samp.sample_set(dim=dim)
        s_set.set_values(values)
    else:
        msg = "The first argument must be of type bet.sample.sample_set, "
        msg += "bet.sample.discretization or np.ndarray"
        raise wrong_argument_type(msg)

    if isinstance(data_partition_set, samp.sample_set_base):
        M = data_partition_set.check_num()
        d_distr_samples = data_partition_set._values
        dim_simpleFun = d_distr_samples.shape[1]
    elif isinstance(data_partition_set, samp.discretization):
        M = data_partition_set.check_nums()
        d_distr_samples = data_partition_set._output_sample_set._values
        dim_simpleFun = d_distr_samples.shape[1]
    elif isinstance(data_partition_set, np.ndarray):
        M = data_partition_set.shape[0]
        dim_simpleFun = data_partition_set.shape[1]
        d_distr_samples = data_partition_set
    else:
        msg = "The second argument must be of type bet.sample.sample_set, "
        msg += "bet.sample.discretization or np.ndarray"
        raise wrong_argument_type(msg)

    if isinstance(data_distribution_set, samp.sample_set_base):
        d_distr_emulate = data_distribution_set._values
        dim_MonteCarlo = d_distr_emulate.shape[1]
        num_d_emulate = data_distribution_set.check_num()
    elif isinstance(data_distribution_set, samp.discretization):
        d_distr_emulate = data_distribution_set._output_sample_set._values
        dim_MonteCarlo = d_distr_emulate.shape[1]
        num_d_emulate = data_distribution_set.check_nums()
    elif isinstance(data_distribution_set, np.ndarray):
        num_d_emulate = data_distribution_set.shape[0]
        dim_MonteCarlo = data_distribution_set.shape[1]
        d_distr_emulate = data_distribution_set
    else:
        msg = "The second argument must be of type bet.sample.sample_set, "
        msg += "bet.sample.discretization or np.ndarray"
        raise wrong_argument_type(msg)

    if np.not_equal(dim_MonteCarlo, dim) or np.not_equal(dim_simpleFun, dim):
        msg = "The argument types have conflicting dimensions"
        raise wrong_argument_type(msg)

    # Initialize sample set object
    s_set = samp.sample_set(dim)
    s_set.set_values(d_distr_samples)
    s_set.set_kdtree()

    (_, k) = s_set.query(d_distr_emulate)

    count_neighbors = np.zeros((M,), dtype=np.int)
    for i in xrange(M):
        count_neighbors[i] = np.sum(np.equal(k, i))

    # Use the binning to define :math:`\rho_{\mathcal{D},M}`
    ccount_neighbors = np.copy(count_neighbors)
    comm.Allreduce([count_neighbors, MPI.INT], [ccount_neighbors, MPI.INT],
                   op=MPI.SUM)
    count_neighbors = ccount_neighbors
    rho_D_M = count_neighbors.astype(np.float64) / \
              float(num_d_emulate * comm.size)
    s_set.set_probabilities(rho_D_M)

    if isinstance(data_set, samp.discretization):
        data_set._output_probability_set = s_set
    return s_set
示例#53
0
import bet.postProcess.plotDomains as plotD
import bet.sample as samp
import bet.sampling.basicSampling as bsam
from myModel import my_model
from Compute_Save_KL import computeSaveKL

# Interface BET to the model.
sampler = bsam.sampler(my_model)

# Define the number of KL terms to use to represent permeability field
num_KL_terms = 2
# Compute and save the KL expansion -- can comment out after running once
computeSaveKL(num_KL_terms)

# Initialize input parameter sample set object
input_samples = samp.sample_set(num_KL_terms)

# Set parameter domain
KL_term_min = -3.0
KL_term_max = 3.0
input_samples.set_domain(
    np.repeat([[KL_term_min, KL_term_max]], num_KL_terms, axis=0))
'''
Suggested changes for user:

Try with and without random sampling.

If using regular sampling, try different numbers of samples
per dimension (be careful if the dimension is not 2).
'''
# Generate samples on the parameter space
示例#54
0
figurepath = "figures_linearode/decay-"+str(decaynum)
if not os.path.isdir(figurepath):
    os.makedirs(figurepath)

# Whether to create new output data
createNewOutputData = sys.argv[2].lower() == 'true'

# Generate true parameters
an, bn = mm.true_param()
coef_true= np.zeros((1,10))
coef_true[0,0::2] = an
coef_true[0,1::2] = bn

# Initialize 2*trunc_term input parameter sample set object
# trunc_term is defined in myModel
input_samples = samp.sample_set(2*mm.param_len)
# Set parameter domain
parameter_domain = mm.my_model_domain(pow = -decaynum, halfwidth0 = 0.5)
input_samples.set_domain(parameter_domain)

# Define the sampler that will be used to create the discretization
# object, which is the fundamental object used by BET to compute
# solutions to the stochastic inverse problem
sampler = bsam.sampler(mm.my_model)


# Generate samples on the parameter space
randomSampling = True
if randomSampling is True:
    input_samples = sampler.random_sample_set('random', input_samples, num_samples =200000)
else:
示例#55
0
def scatter_rhoD(sample_obj,
                 ref_sample=None,
                 sample_nos=None,
                 io_flag='input',
                 rho_D=None,
                 dim_nums=None,
                 label_char=None,
                 showdim=None,
                 save=True,
                 interactive=False,
                 file_extension=".png",
                 markersize=75):
    r"""
    Create scatter plots of samples within the sample object colored by
    ``color`` (usually an array of pointwise probability density values).  A
    reference sample (``ref_sample``) can be chosen by the user.  This reference
    sample will be plotted as a mauve circle twice the size of the other
    markers.

    .. note::

        Do not specify the file extension in BOTH ``filename`` and
        ``file_extension``.

    :param sample_obj: Object containing the samples to plot
    :type sample_obj: :class:`~bet.sample.discretization`
        or :class:`~bet.sample.sample_set_base`
    :param ref_sample: reference parameter value
    :type ref_sample: :class:`numpy.ndarray` of shape (ndim,)
    :param list sample_nos: sample numbers to plot
    :param string io_flag: Either `input` or `output`. If ``sample_obj`` is a
        :class:`~bet.sample.discretization` object flag whether or not put plot
        input or output.
    :param rho_D: probability density function on D
    :type rho_D: callable function that takes a :class:`np.array` and returns a
        :class:`numpy.ndarray`
    :param list dim_nums: integers representing domain coordinate
        numbers to plot (e.g. i, where :math:`\x_i` is a coordinate in the
        input/output space).
    :param string label_char: character to use to label coordinate axes
    :param int showdim: 2 or 3, flag to determine whether or not to show
        pairwise or tripletwise parameter sample scatter plots in 2 or 3
        dimensions
    :param bool save: flag whether or not to save the figure
    :param bool interactive: flag whether or not to show the figure
    :param string file_extension: file extension

    """
    # If there is density function given determine the pointwise probability
    # values of each sample based on the value in the data space. Otherwise,
    # color the samples in numerical order.
    rD = None
    if isinstance(sample_obj, sample.discretization):
        if rho_D is not None:
            rD = rho_D(sample_obj._output_sample_set.get_values())
        if io_flag == 'input':
            sample_obj = sample_obj._input_sample_set
        else:
            sample_obj = sample_obj._output_sample_set
    elif isinstance(sample_obj, sample.sample_set_base):
        if io_flag == 'output':
            rD = rho_D(sample_obj.get_values())
    else:
        raise bad_object("Improper sample object")

    if ref_sample is None:
        ref_sample = sample_obj._reference_value

    if rD is None:
        rD = np.ones(sample_obj.get_values().shape[0])

    if label_char is None:
        if io_flag == 'input':
            label_char = r'$\lambda_'
            prefix = 'input_'
        elif io_flag == 'output':
            label_char = r'$q_'
            prefix = 'output_'
        else:  # no io_flag
            label_char = r'$x_'
            prefix = 'rhoD_'
    else:
        label_char = r'$x_'
        prefix = 'rhoD_'

    # If no specific coordinate numbers are given for the parameter coordinates
    # (e.g. i, where \lambda_i is a coordinate in the parameter space), then
    # set them to be the the counting numbers.
    if dim_nums is None:
        dim_nums = 1 + np.array(np.arange(sample_obj.get_values().shape[1]))
    # Create the labels based on the user selected parameter coordinates
    xlabel = label_char + r'{' + str(dim_nums[0]) + '}$'
    ylabel = label_char + r'{' + str(dim_nums[1]) + '}$'
    savename = prefix + 'samples_cs'
    # Plot 2 or 3 dimensional scatter plots of the samples colored by rD.
    if sample_obj.get_dim() == 2:
        scatter_2D(sample_obj,
                   sample_nos,
                   rD,
                   ref_sample,
                   save,
                   interactive,
                   xlabel,
                   ylabel,
                   None,
                   savename,
                   markersize=markersize)
    elif sample_obj.get_dim() > 2 and showdim == 2:
        temp_obj = sample.sample_set(2)
        for x, y in combinations(dim_nums, 2):
            xlabel = label_char + r'{' + str(x) + '}$'
            ylabel = label_char + r'{' + str(y) + '}$'
            savename = prefix + 'samples_x' + str(x) + 'x' + str(y) + '_cs'
            temp_obj.set_values(sample_obj.get_values()[:, [x - 1, y - 1]])
            if ref_sample is not None:
                scatter_2D(temp_obj,
                           sample_nos,
                           rD,
                           ref_sample[[x - 1, y - 1]],
                           save,
                           interactive,
                           xlabel,
                           ylabel,
                           None,
                           savename,
                           markersize=markersize)
            else:
                scatter_2D(temp_obj,
                           sample_nos,
                           rD,
                           ref_sample,
                           save,
                           interactive,
                           xlabel,
                           ylabel,
                           None,
                           savename,
                           markersize=markersize)
    elif sample_obj.get_dim() == 3:
        zlabel = label_char + r'{' + str(dim_nums[2]) + '}$'
        scatter_3D(sample_obj,
                   sample_nos,
                   rD,
                   ref_sample,
                   save,
                   interactive,
                   xlabel,
                   ylabel,
                   zlabel,
                   None,
                   savename,
                   markersize=markersize)
    elif sample_obj.get_dim() > 3 and showdim == 3:
        temp_obj = sample.sample_set(3)
        for x, y, z in combinations(dim_nums, 3):
            xlabel = label_char + r'{' + str(x) + '}$'
            ylabel = label_char + r'{' + str(y) + '}$'
            zlabel = label_char + r'{' + str(z) + '}$'
            savename = prefix + 'samples_x' + str(x) + 'x' + str(y) + 'x' +\
                str(z) + '_cs'
            temp_obj.set_values(sample_obj.get_values()[:,
                                                        [x - 1, y - 1, z - 1]])
            if ref_sample is not None:
                scatter_3D(temp_obj,
                           sample_nos,
                           rD,
                           ref_sample[[x - 1, y - 1, z - 1]],
                           save,
                           interactive,
                           xlabel,
                           ylabel,
                           zlabel,
                           None,
                           savename,
                           file_extension,
                           markersize=markersize)
            else:
                scatter_3D(temp_obj,
                           sample_nos,
                           rD,
                           ref_sample,
                           save,
                           interactive,
                           xlabel,
                           ylabel,
                           zlabel,
                           None,
                           savename,
                           file_extension,
                           markersize=markersize)
import bet.postProcess.postTools as postTools
import bet.Comm as comm
import bet.sample as sample

# Set up the info for the spaces
input_dim = 5
QoI_scalar_map_num = 10
num_samples = 1E5
num_centers = 10

# Let the map Q be a random matrix of size (QoI_scalar_map_num, input_dim)
np.random.seed(0)
Q = np.random.random([QoI_scalar_map_num, input_dim])

# Initialize some sample objects we will need
input_samples = sample.sample_set(input_dim)
output_samples = sample.sample_set(QoI_scalar_map_num)

# Choose random samples in parameter space to solve the model
input_samples.set_values(
    np.random.uniform(0, 1, [np.int(num_samples), input_dim]))

# Compute the output values with the map Q
output_samples.set_values(
    Q.dot(input_samples.get_values().transpose()).transpose())

# Calculate the gradient vectors at some subset of the samples.  Here the
# *normalize* argument is set to *True* because we are using *bin_ratio* to
# determine the uncertainty in our data.
cluster_discretization = sample.discretization(input_samples, output_samples)
# We will approximate the jacobian at each of the centers
import bet.postProcess.plotP as plotP
import bet.postProcess.plotDomains as plotD
import bet.sample as samp
import bet.sampling.basicSampling as bsam

from myModel import my_model

# Define the sampler that will be used to create the discretization
# object, which is the fundamental object used by BET to compute
# solutions to the stochastic inverse problem.
# The sampler and my_model is the interface of BET to the model,
# and it allows BET to create input/output samples of the model.
sampler = bsam.sampler(my_model)

# Initialize 2-dimensional input parameter sample set object
input_samples = samp.sample_set(2)

# Set parameter domain
input_samples.set_domain(np.array([[3.0, 6.0], [1.0, 5.0]]))
'''
Suggested changes for user:

Try with and without random sampling.

If using random sampling, try num_samples = 1E3 and 1E4.
What happens when num_samples = 1E2?
Try using 'lhs' instead of 'random' in the random_sample_set.

If using regular sampling, try different numbers of samples
per dimension.
'''
示例#58
0
def show_data_domain_multi(sample_disc,
                           Q_ref=None,
                           Q_nums=None,
                           img_folder='figs/',
                           ref_markers=None,
                           ref_colors=None,
                           showdim=None,
                           file_extension=".png",
                           markersize=75):
    r"""
    Plots 2-D projections of the data domain D using a triangulation based on
    the first two coordinates (parameters) of the generating samples where
    :math:`Q={q_1, q_i}` for ``i=Q_nums``, with a marker for various
    :math:`Q_{ref}`.

    :param sample_disc: Object containing the samples to plot
    :type sample_disc: :class:`~bet.sample.discretization`
    :param Q_ref: reference data value
    :type Q_ref: :class:`numpy.ndarray` of shape (M, mdim)
    :param list Q_nums: dimensions of the QoI to plot
    :param string img_folder: folder to save the plots to
    :param list ref_markers: list of marker types for :math:`Q_{ref}`
    :param list ref_colors: list of colors for :math:`Q_{ref}`
    :param showdim: default 1. If int then flag to show all combinations with a
        given dimension (:math:`q_i`) or if ``all`` show all combinations.
    :type showdim: int or string
    :param string file_extension: file extension

    """
    if not isinstance(sample_disc, sample.discretization):
        raise bad_object("Improper sample object")

    # Set the default marker and colors
    if ref_markers is None:
        ref_markers = markers
    if ref_colors is None:
        ref_colors = colors

    data_obj = sample_disc._output_sample_set
    sample_obj = sample_disc._input_sample_set

    if Q_ref is None:
        Q_ref = data_obj._reference_value

    # If no specific coordinate numbers are given for the data coordinates
    # (e.g. i, where \q_i is a coordinate in the data space), then
    # set them to be the the counting numbers.
    if Q_nums is None:
        Q_nums = list(range(data_obj.get_dim()))

    # If no specific coordinate number of choice is given set to be the first
    # coordinate direction.
    if showdim is None:
        showdim = 0

    # Create a folder for these figures if it doesn't already exist
    if not os.path.isdir(img_folder):
        os.mkdir(img_folder)

    # Make sure the shape of Q_ref is correct
    if Q_ref is not None:
        Q_ref = util.fix_dimensions_data(Q_ref, data_obj.get_dim())

    # Create the triangulization to use to define the topology of the samples
    # in the data space from the first two parameters in the parameter space
    triangulation = tri.Triangulation(sample_obj.get_values()[:, 0],
                                      sample_obj.get_values()[:, 1])
    triangles = triangulation.triangles

    # Create plots of the showdim^th QoI (q_{showdim}) with all other QoI (q_i)
    if isinstance(showdim, int):
        for i in Q_nums:
            if i != showdim:
                xlabel = r'$q_{' + str(showdim + 1) + r'}$'
                ylabel = r'$q_{' + str(i + 1) + r'}$'

                filenames = [
                    img_folder + 'domain_q' + str(showdim + 1) + '_q' +
                    str(i + 1), img_folder + 'q' + str(showdim + 1) + '_q' +
                    str(i + 1) + '_domain_Q_cs'
                ]

                data_obj_temp = sample.sample_set(2)
                data_obj_temp.set_values(data_obj.get_values()[:,
                                                               [showdim, i]])
                sample_disc_temp = sample.discretization(
                    sample_obj, data_obj_temp)

                if Q_ref is not None:
                    show_data_domain_2D(sample_disc_temp,
                                        Q_ref[:, [showdim, i]],
                                        ref_markers,
                                        ref_colors,
                                        xlabel=xlabel,
                                        ylabel=ylabel,
                                        triangles=triangles,
                                        save=True,
                                        interactive=False,
                                        filenames=filenames,
                                        file_extension=file_extension,
                                        markersize=markersize)
                else:
                    show_data_domain_2D(sample_disc_temp,
                                        None,
                                        ref_markers,
                                        ref_colors,
                                        xlabel=xlabel,
                                        ylabel=ylabel,
                                        triangles=triangles,
                                        save=True,
                                        interactive=False,
                                        filenames=filenames,
                                        file_extension=file_extension,
                                        markersize=markersize)
    # Create plots of all combinations of QoI in 2D
    elif showdim == 'all' or showdim == 'ALL':
        for x, y in combinations(Q_nums, 2):
            xlabel = r'$q_{' + str(x + 1) + r'}$'
            ylabel = r'$q_{' + str(y + 1) + r'}$'

            filenames = [
                img_folder + 'domain_q' + str(x + 1) + '_q' + str(y + 1),
                img_folder + 'q' + str(x + 1) + '_q' + str(y + 1) +
                '_domain_Q_cs'
            ]

            data_obj_temp = sample.sample_set(2)
            data_obj_temp.set_values(data_obj.get_values()[:, [x, y]])
            sample_disc_temp = sample.discretization(sample_obj, data_obj_temp)

            if Q_ref is not None:
                show_data_domain_2D(sample_disc_temp,
                                    Q_ref[:, [x, y]],
                                    ref_markers,
                                    ref_colors,
                                    xlabel=xlabel,
                                    ylabel=ylabel,
                                    triangles=triangles,
                                    save=True,
                                    interactive=False,
                                    filenames=filenames,
                                    file_extension=file_extension,
                                    markersize=markersize)
            else:
                show_data_domain_2D(sample_disc_temp,
                                    None,
                                    ref_markers,
                                    ref_colors,
                                    xlabel=xlabel,
                                    ylabel=ylabel,
                                    triangles=triangles,
                                    save=True,
                                    interactive=False,
                                    filenames=filenames,
                                    file_extension=file_extension,
                                    markersize=markersize)
示例#59
0
import bet.calculateP.calculateP as calculateP
import bet.postProcess.plotP as plotP
import bet.postProcess.plotDomains as plotD
import bet.sample as samp
import bet.sampling.basicSampling as bsam
from myModel import my_model

# Define the sampler that will be used to create the discretization
# object, which is the fundamental object used by BET to compute
# solutions to the stochastic inverse problem.
# The sampler and my_model is the interface of BET to the model,
# and it allows BET to create input/output samples of the model.
sampler = bsam.sampler(my_model)

# Initialize 3-dimensional input parameter sample set object
input_samples = samp.sample_set(2)

# Set parameter domain
input_samples.set_domain(np.repeat([[0.0, 1.0]], 2, axis=0))

'''
Suggested changes for user:

Try with and without random sampling.

If using random sampling, try num_samples = 1E3 and 1E4.
What happens when num_samples = 1E2?
Try using 'lhs' instead of 'random' in the random_sample_set.

If using regular sampling, try different numbers of samples
per dimension.
示例#60
0
def scatter_2D_multi(sample_obj,
                     color=None,
                     ref_sample=None,
                     img_folder='figs/',
                     filename="scatter2Dm",
                     label_char=r'$\lambda',
                     showdim=None,
                     file_extension=".png",
                     cbar_label=None,
                     markersize=75):
    r"""
    Creates two-dimensional projections of scatter plots of samples colored
    by ``color`` (usually an array of pointwise probability density values). A
    reference sample (``ref_sample``) can be chosen by the user. This reference
    sample will be plotted as a mauve circle twice the size of the other
    markers.

    .. note::

        Do not specify the file extension in BOTH ``filename`` and
        ``file_extension``.

    :param sample_obj: Object containing the samples to plot
    :type sample_obj: :class:`~bet.sample.sample_set_base`
    :param color: values to color the ``samples`` by
    :type color: :class:`numpy.ndarray`
    :param string filename: filename to save the figure as
    :param string label_char: character to use to label coordinate axes
    :param bool save: flag whether or not to save the figure
    :param bool interactive: flag whether or not to show the figure
    :param string img_folder: folder to save the plots to
    :param showdim: default 1. If int then flag to show all combinations with a
        given dimension (:math:`\lambda_i`) or if ``all`` show all combinations.
    :type showdim: int or string
    :param string filename: filename to save the figure as
    :param string cbar_label: color bar label

    """
    if not isinstance(sample_obj, sample.sample_set_base):
        raise bad_object("Improper sample object")
    # If no specific coordinate number of choice is given set to be the first
    # coordinate direction.
    if showdim is None:
        showdim = 0
    # Create a folder for these figures if it doesn't already exist
    if not os.path.isdir(img_folder):
        os.mkdir(img_folder)
    # Create list of all the parameter coordinates
    p_nums = np.arange(sample_obj.get_dim())

    # Create plots of the showdim^th parameter (\lambda_{showdim}) with all the
    # other parameters
    if isinstance(showdim, int):
        for i in p_nums:
            xlabel = label_char + r'_{' + str(showdim + 1) + r'}$'
            ylabel = label_char + r'_{' + str(i + 1) + r'}$'

            postfix = '_d' + str(showdim + 1) + '_d' + str(i + 1)
            myfilename = os.path.join(img_folder, filename + postfix)

            sample_obj_temp = sample.sample_set(2)
            sample_obj_temp.set_values(sample_obj.get_values()[:,
                                                               [showdim, i]])

            if ref_sample is not None:
                scatter_2D(sample_obj_temp,
                           sample_nos=None,
                           color=color,
                           ref_sample=ref_sample[[showdim, i]],
                           save=True,
                           interactive=False,
                           xlabel=xlabel,
                           ylabel=ylabel,
                           cbar_label=cbar_label,
                           filename=myfilename,
                           file_extension=file_extension,
                           markersize=markersize)
            else:
                scatter_2D(sample_obj_temp,
                           sample_nos=None,
                           color=color,
                           ref_sample=None,
                           save=True,
                           interactive=False,
                           xlabel=xlabel,
                           ylabel=ylabel,
                           cbar_label=cbar_label,
                           filename=myfilename,
                           file_extension=file_extension,
                           markersize=markersize)

    # Create plots of all of the possible pairwise combinations of parameters
    elif showdim == 'all' or showdim == 'ALL':
        for x, y in combinations(p_nums, 2):
            xlabel = label_char + r'_{' + str(x + 1) + r'}$'
            ylabel = label_char + r'_{' + str(y + 1) + r'}$'

            postfix = '_d' + str(x + 1) + '_d' + str(y + 1)
            myfilename = os.path.join(img_folder, filename + postfix)

            sample_obj_temp = sample.sample_set(2)
            sample_obj_temp.set_values(sample_obj.get_values()[:, [x, y]])

            if ref_sample is not None:
                scatter_2D(sample_obj_temp,
                           sample_nos=None,
                           color=color,
                           ref_sample=ref_sample[[x, y]],
                           save=True,
                           interactive=False,
                           xlabel=xlabel,
                           ylabel=ylabel,
                           cbar_label=cbar_label,
                           filename=myfilename,
                           markersize=markersize,
                           file_extension=file_extension)
            else:
                scatter_2D(sample_obj_temp,
                           sample_nos=None,
                           color=color,
                           ref_sample=None,
                           save=True,
                           interactive=False,
                           xlabel=xlabel,
                           ylabel=ylabel,
                           cbar_label=cbar_label,
                           markersize=markersize,
                           filename=myfilename,
                           file_extension=file_extension)