コード例 #1
0
def Plot3_2_Quasi():
    numRows = 3

    fig = plt.figure(figsize=(20, 10))

    for i in range(0, len(n) * numRows):
        axes = fig.add_subplot(numRows, len(n), i + 1)
        axes.set_aspect(1.)
        axes.set_title("N = " + str(n[i % len(n)]))

        x = []
        y = []
        if (i < (len(n) * numRows) / 3):  # Sobol
            seq = sobol_seq.i4_sobol_generate(2, n[i % len(n)])
            for s in seq:
                x.append(s[0] * 2)
                axes.set_xlim([0, 2.0])
                axes.set_ylim([0, 2.0])
        elif (i >= (len(n) * numRows) / 3
              and i < 2 * ((len(n) * numRows) / 3)):  # Halton
            seq = ghalton.Halton(5).get(n[i % len(n)])
            for s in seq:
                x.append(s[4] * 2)
                axes.set_xlim([0, 2.0])
                axes.set_ylim([0, 2.0])
        else:  #  Halton (again)
            seq = ghalton.Halton(20).get(n[i % len(n)])
            for s in seq:
                x.append(s[19] * 2)
                axes.set_xlim([0, 2.0])
                axes.set_ylim([0, 2.0])
        axes.hist(x, bins=100, normed=1)

    plt.show()
コード例 #2
0
 def __init__(self, dimension, layer, unit, activation):
     self.dimension = dimension
     self.layer = layer
     self.unit = unit
     self.batch = 64
     self.batch_d = 64
     self.beta = 1000
     self.sequencer = ghalton.Halton(dimension)
     self.net = ResNet(layer, unit, activation)
     self.x_b = tf.compat.v1.placeholder(tf.float64, (None, dimension))
     self.x_i = tf.compat.v1.placeholder(tf.float64, (None, dimension))
     output_b = self.net(self.x_b)
     output_i = self.net(self.x_i)
     self.loss_b = 10 * tf.reduce_mean((output_b - self.g(self.x_b)) ** 2)
     self.loss_i = 4 * tf.reduce_mean(self.norm2_grad(output_i, self.x_i) / 2 - self.f(self.x_i) * output_i)
     self.loss = self.beta/2 * self.loss_b + self.loss_i
     self.opt = tf.compat.v1.train.AdamOptimizer(learning_rate = 0.001).minimize(self.loss)
     self.errl2 = self.l2(output_i - self.u(self.x_i), self.x_i) / self.l2(self.u(self.x_i), self.x_i)
     self.errh1 = self.h1(output_i - self.u(self.x_i), self.x_i) / self.h1(self.u(self.x_i), self.x_i)
     self.errh2 = self.h2(output_i - self.u(self.x_i), self.x_i) / self.h2(self.u(self.x_i), self.x_i)
     tf.compat.v1.summary.scalar('loss_b', self.loss_b)
     tf.compat.v1.summary.scalar('loss_i', self.loss_i)
     tf.compat.v1.summary.scalar('loss', self.loss)
     tf.compat.v1.summary.scalar('errl2', self.errl2)
     tf.compat.v1.summary.scalar('errh1', self.errh1)
     tf.compat.v1.summary.scalar('errh2', self.errh2)
     self.init = tf.compat.v1.global_variables_initializer()
     self.merged = tf.compat.v1.summary.merge_all()
コード例 #3
0
    def __c_1_lambda_quasi(_lambda, dim, ecdf):

        from scipy.stats import norm

        seq = gh.Halton(int(dim))

        # for memory concern
        proj_max = zeros(n_sample_per_cycle)

        for i in range(n_sample):

            # Halton sequences and quasi-Gaussians
            halton_samples = seq.get(int(_lambda))
            quasi_samples = array([[norm.ppf(halton_samples[k][m]) for m in range(dim)]\
                for k in range(_lambda)]).T

            # projection onto e1
            proj = quasi_samples[0, :]

            # the largest order statistic
            proj_sorted = sort(proj)
            proj_max[i % n_sample_per_cycle] = proj_sorted[-1]

            if (i + 1) % n_sample_per_cycle == 0:
                for k, x in enumerate(x_point):
                    ecdf[k] += np.sum(proj_max <= x)
コード例 #4
0
def build_quasirandom_transforms(num_transforms, color_sigma, zoom_range,
                                 rotation_range, shear_range,
                                 translation_range, do_flip=True,
                                 allow_stretch=False, skip=0):
    """Quasi Random transform for test images, determinastic random transform

    Args:
        num_transforms: a int, total numbers of transform
        color_sigma: a float, color noise
        zoom_range: a tuple (min_zoom, max_zoom)
        rotation_range: a tuple(min_angle, max_angle)
        shear_range: a tuple(min_shear, max_shear)
        translation_range: a tuple(min_shift, max_shift)
        do_flip: a bool, flip an image
        allow_stretch: a bool, allow stretching

    Returns:
        transform instance and color vecs

    """
    gen = ghalton.Halton(10)
    uniform_samples = np.array(gen.get(num_transforms + skip))[skip:]

    tfs = []
    for s in uniform_samples:
        rotation = uniform(s[0], *rotation_range)
        shift_x = uniform(s[1], *translation_range)
        shift_y = uniform(s[2], *translation_range)
        translation = (shift_x, shift_y)

        # setting shear last because we're not using it at the moment
        shear = uniform(s[9], *shear_range)

        if do_flip:
            flip = bernoulli(s[8], p=0.5)
        else:
            flip = False

        log_zoom_range = [np.log(z) for z in zoom_range]
        if isinstance(allow_stretch, float):
            log_stretch_range = [-np.log(allow_stretch), np.log(allow_stretch)]
            zoom = np.exp(uniform(s[6], *log_zoom_range))
            stretch = np.exp(uniform(s[7], *log_stretch_range))
            zoom_x = zoom * stretch
            zoom_y = zoom / stretch
        elif allow_stretch is True:  # avoid bugs, f.e. when it is an integer
            zoom_x = np.exp(uniform(s[6], *log_zoom_range))
            zoom_y = np.exp(uniform(s[7], *log_zoom_range))
        else:
            zoom_x = zoom_y = np.exp(uniform(s[6], *log_zoom_range))
        # the range should be multiplicatively symmetric, so [1/1.1, 1.1]
        # instead of [0.9, 1.1] makes more sense.

        tfs.append(data.build_augmentation_transform((zoom_x, zoom_y),
                                                     rotation, shear, translation, flip))

    color_vecs = [normal(s[3:6], avg=0.0, std=color_sigma)
                  for s in uniform_samples]

    return tfs, color_vecs
コード例 #5
0
 def __init__(self, dimension, layer, unit, activation):
     self.dimension = dimension
     self.layer = layer
     self.unit = unit
     self.batch = 64
     self.batch_d = 64
     self.beta = 1000
     self.sequencer = ghalton.Halton(dimension)
     self.net = ResNet(layer, unit, activation)
     self.x_d = tf.compat.v1.placeholder(tf.float32, (None, dimension))
     self.x_n = tf.compat.v1.placeholder(tf.float32, (None, dimension))
     self.x_i = tf.compat.v1.placeholder(tf.float32, (None, dimension))
     output_d = self.net(self.x_d)
     output_n = self.net(self.x_n)
     output_i = self.net(self.x_i)
     self.loss_b = self.dimension * (tf.reduce_mean(
         self.beta / 2 * output_d**2 -
         output_d * self.agradn(output_d, self.x_d) - self.g_d(self.x_d) *
         (self.beta * output_d - self.agradn(output_d, self.x_d))) -
                                     tf.reduce_mean(
                                         self.g_n(self.x_n) * output_n))
     self.loss_i = tf.reduce_mean(
         self.agradgrad(output_i, self.x_i) / 2 -
         self.f(self.x_i) * output_i)
     self.loss = self.loss_b + self.loss_i
     self.opt = tf.compat.v1.train.AdamOptimizer(
         learning_rate=0.001).minimize(self.loss)
     self.errl2 = self.l2(output_i - self.u(self.x_i), self.x_i) / self.l2(
         self.u(self.x_i), self.x_i)
     self.errh1 = self.h1(output_i - self.u(self.x_i), self.x_i) / self.h1(
         self.u(self.x_i), self.x_i)
     self.errh2 = self.h2(output_i - self.u(self.x_i), self.x_i) / self.h2(
         self.u(self.x_i), self.x_i)
     self.init = tf.compat.v1.global_variables_initializer()
def build_quasirandom_transforms(num_transforms, zoom_range, rotation_range, shear_range, translation_range, do_flip=True, allow_stretch=False):
    gen = ghalton.Halton(7)  # 7 dimensions to sample along
    uniform_samples = np.array(gen.get(num_transforms))

    tfs = []
    for s in uniform_samples:
        shift_x = icdf.uniform(s[0], *translation_range)
        shift_y = icdf.uniform(s[1], *translation_range)
        translation = (shift_x, shift_y)

        rotation = icdf.uniform(s[2], *rotation_range)
        shear = icdf.uniform(s[3], *shear_range)

        if do_flip:
            flip = icdf.bernoulli(s[4], p=0.5)
        else:
            flip = False

        log_zoom_range = [np.log(z) for z in zoom_range]
        if isinstance(allow_stretch, float):
            log_stretch_range = [-np.log(allow_stretch), np.log(allow_stretch)]
            zoom = np.exp(icdf.uniform(s[5], *log_zoom_range))
            stretch = np.exp(icdf.uniform(s[6], *log_stretch_range))
            zoom_x = zoom * stretch
            zoom_y = zoom / stretch
        elif allow_stretch is True:  # avoid bugs, f.e. when it is an integer
            zoom_x = np.exp(icdf.uniform(s[5], *log_zoom_range))
            zoom_y = np.exp(icdf.uniform(s[6], *log_zoom_range))
        else:
            zoom_x = zoom_y = np.exp(icdf.uniform(s[5], *log_zoom_range))
        # the range should be multiplicatively symmetric, so [1/1.1, 1.1] instead of [0.9, 1.1] makes more sense.

        tfs.append(data.build_augmentation_transform((zoom_x, zoom_y), rotation, shear, translation, flip))

    return tfs
コード例 #7
0
 def __init__(self, dimension, layer, unit, activation):
     self.dimension = dimension
     self.layer = layer
     self.unit = unit
     self.batch = 64
     self.batch_d = 64
     self.beta = 500
     self.sequencer = ghalton.Halton(dimension)
     self.net = ResNet(layer, unit, activation)
     self.x_b = tf.compat.v1.placeholder(tf.float64, (None, dimension))
     self.x_i = tf.compat.v1.placeholder(tf.float64, (None, dimension))
     output_b = self.net(self.x_b)
     output_i = self.net(self.x_i)
     self.loss_b = 10 * tf.reduce_mean(self.beta / 2 *
                                       (output_b - self.g(self.x_b))**2 +
                                       (self.g(self.x_b) - output_b) *
                                       self.diff_n(output_b, self.x_b))
     #        self.loss_b = 2 * dimension * tf.reduce_mean(self.beta / 2 * output_b ** 2 - output_b * self.diff_n(output_b, self.x_b) - self.g(self.x_b) * (self.beta * output_b - self.diff_n(output_b, self.x_b)))
     self.loss_i = 4 * tf.reduce_mean(
         self.norm2_grad(output_i, self.x_i) / 2 -
         self.f(self.x_i) * output_i)
     self.loss = self.loss_b + self.loss_i
     self.opt = tf.compat.v1.train.AdamOptimizer(
         learning_rate=0.001).minimize(self.loss)
     self.errl2 = self.l2(output_i - self.u(self.x_i), self.x_i) / self.l2(
         self.u(self.x_i), self.x_i)
     self.errh1 = self.h1(output_i - self.u(self.x_i), self.x_i) / self.h1(
         self.u(self.x_i), self.x_i)
     self.errh2 = self.h2(output_i - self.u(self.x_i), self.x_i) / self.h2(
         self.u(self.x_i), self.x_i)
     self.init = tf.compat.v1.global_variables_initializer()
コード例 #8
0
def ___c_1_lambda_quasi(_lambda, dim):
    """
    Numerical computation for 
    progress coefficient c(1, lambda)
    under quasi-random numbers
        
    """
    import ghalton as gh
    
    def __c_1_lambda_quasi(_lambda, dim, proj_max, halton_samples):
       
        from scipy.stats import norm
        
#        seq = gh.Halton(int(dim))
    
        # for memory concern
        n_sample = len(proj_max)
        
        for i in range(n_sample):
            
            # Halton sequences and quasi-Gaussians
                
            quasi_samples = array([[norm.ppf(halton_samples[k][m]) for m in range(dim)]\
                for k in range(_lambda)]).T
            
            # projection onto e1
            proj = quasi_samples[0, :]
            
            # the largest order statistic
            proj_sorted= sort(proj)
            proj_max[i] = proj_sorted[-1]
        
    n_trial = 1e4
    n_worker = 4
    n_sample = int(n_trial / n_worker)
    
    seq = gh.Halton(int(dim))
    halton_samples = seq.get(int(_lambda*n_trial))
    
    proj_max = [Array('f', zeros(n_sample)) for i in range(n_worker)]
    procs = [Process(target=__c_1_lambda_quasi, args=[_lambda, dim, proj_max[i],\
        halton_samples[i*n_sample:(i+1)*n_sample]]) for i in range(n_worker)]
        
    # Starting the parallel computation
    for p in procs: p.start()
    for p in procs: p.join()
    
    all_sample = empty(n_trial)
    i = 0
    for a in proj_max:
        all_sample[i:i+len(a)] = array(a)
        i += len(a)
    
    epdf_om = gaussian_kde(all_sample)
    
    i = lambda x: x * epdf_om(x)
    E, err = quad(i, -inf, inf)
    
    return E
コード例 #9
0
def gen_tile_idx():
   
    k = ghalton.Halton(1)
    points = k.get(10)
    points = list(itertools.chain.from_iterable(points))
    points = np.array(points)
 
    return points
コード例 #10
0
    def __init__(self, num_experiment):

        state = np.random.get_state()

        np.random.seed(num_experiment)

        options = parse_config_file('.', 'config.json')
        input_space = InputSpace(options["variables"])
        tasks = parse_tasks_from_jobs(None, options["experiment-name"],
                                      options, input_space)

        for key in tasks:
            tasks[key].options['likelihood'] = "NOISELESS"

        sequence_size = 1000
        sequencer = ghalton.Halton(input_space.num_dims)
        X = np.array(sequencer.get(sequence_size))

        self.models = dict()
        self.tasks = tasks
        self.input_space = input_space

        for key in tasks:
            self.models[key] = GP(input_space.num_dims, **tasks[key].options)
            self.models[key].params['ls'].set_value(
                np.ones(input_space.num_dims) * 0.25 * input_space.num_dims)

            params = dict()
            params['hypers'] = dict()

            for hp in self.models[key].params:
                params['hypers'][hp] = self.models[key].params[hp].value

            params['chain length'] = 0.0

            # We sample given the specified hyper-params

            samples = self.models[key].sample_from_prior_given_hypers(X)
            self.models[key].fit(X, samples, hypers=params, fit_hypers=False)

#	def compute_function(gp):
#		def f(x):
#			return gp.predict(x)[ 0 ]
#		return f

#	self.funs = dict()

#	for key in self.models:
#		self.funs[ key ] = compute_function(self.models[ key ])

        self.funs = {
            key: sample_gp_with_random_features(self.models[key],
                                                NUM_RANDOM_FEATURES)
            for key in self.models
        }

        np.random.set_state(state)
コード例 #11
0
def deterministic_sample_fn(body,
                            collisions=False,
                            seed=False):  # TODO - collisions, seed
    import ghalton
    sequencer = ghalton.Halton(body.GetActiveDOF())
    #sequencer = ghalton.GeneralizedHalton(body.GetActiveDOF(), 68)
    lower_limits, upper_limits = body.GetActiveDOFLimits()
    return lambda: sequencer.get(1)[0] * (upper_limits - lower_limits
                                          ) + lower_limits
コード例 #12
0
    def getPoints_Halton(self, volumeshape, options, number=1000):
        points = []
        sequencer = ghalton.Halton(len(volumeshape))
        sequence = sequencer.get(number)
        for s in sequence:
            points.append(np.multiply(s, volumeshape))

        points = np.array(points).astype(int)

        return points
コード例 #13
0
def lrIterator(num_configs=10, decay_range=(0, 1e-3)):
    sequencer = ghalton.Halton(1)
    all_points = np.sort(np.squeeze(np.asarray(sequencer.get(num_configs))))
    print(all_points)

    decay_start = decay_range[0]
    decay_stop = decay_range[1]
    for point in all_points:
        decay = decay_start + (point * (decay_stop - decay_start))
        config = LRConfig(decay=decay)
        yield config
コード例 #14
0
 def init_sequencer(self):
     # ---------------------------------------#
     #                HALTON                  #
     # ---------------------------------------#
     if self.sequence_type == QMC_SEQUENCE.HALTON:
         if self.scramble_type == QMC_SCRAMBLING.GENERALISED:
             if self.qmc_kwargs[QMC_KWARG.PERM] is None:
                 perm = gh.EA_PERMS[:self.d]  # Default permutation
             else:
                 perm = self.qmc_kwargs[QMC_KWARG.PERM]
             self.sequencer = gh.GeneralizedHalton(perm)
         else:
             self.sequencer = gh.Halton(int(self.d))
コード例 #15
0
def gen_square(xl, yl, xsize, ysize, r):
    seq = ghalton.Halton(2)
    uniform = np.array(seq.get(100))

    lo = np.array([xl, yl]) + (r + 2.0)
    hi = np.array([xl, yl]) + np.array([xsize / 2.0, ysize / 2.0])

    pts = lo + uniform * (hi - lo)

    plt.scatter(pts[:50, 0], pts[:50, 1])
    plt.show()

    return pts
コード例 #16
0
def datagenIterator(range_dict, num_configs=10, best_config={}):
    best_val_loss = np.float('inf')
    sequencer = ghalton.Halton(len(range_dict))
    config = dict(best_config)
    for key, max_value in range_dict.iteritems():
        points = np.sort(np.asarray(sequencer.get(num_configs)))
        if(key=='channel_shift_range' or key=='rotation_range'):
            values = np.int32(np.floor(points*(max_value+1)))
        else:
            values = points*max_value
        for value in values:
            config.update({key:value})
            yield config
コード例 #17
0
ファイル: storage.py プロジェクト: maxc01/addtree
    def optimize(self, kernel, n_restart=1, verbose=False, quasi=False):
        gp = george.GP(kernel, mean=self.Y.mean())
        gp.compute(self.X, self.Yerr)

        if verbose:
            print("Initial ln-likelihood: {0:.2f}".format(
                gp.log_likelihood(self.Y)))

        def _neg_ln_like(p):
            gp.set_parameter_vector(p)
            return -gp.log_likelihood(self.Y)

        def _grad_neg_ln_like(p):
            gp.set_parameter_vector(p)
            return -gp.grad_log_likelihood(self.Y)

        bounds = kernel.get_parameter_bounds()
        x_best = None
        y_best = np.inf
        if quasi:
            import ghalton
            sequencer = ghalton.Halton(len(bounds))
            seeds = sequencer.get(n_restart)
        else:
            seeds = np.random.uniform(*zip(*bounds),
                                      size=(n_restart, len(bounds)))
        for i in range(n_restart):
            result = minimize(
                _neg_ln_like,
                x0=seeds[i],
                jac=_grad_neg_ln_like,
                bounds=bounds,
                method="L-BFGS-B",
            )
            if result.success is False:
                warnings.warn(
                    "Gaussian Process optimization is not successful.")
            if result.fun < y_best:
                y_best = result.fun
                x_best = result.x

        if x_best is None:
            raise RuntimeError("All optimizations are not successful.")

        gp.set_parameter_vector(x_best)
        if verbose:
            print("Best parameter of kernel: {}".format(x_best))
            print("\nFinal ln-likelihood: {0:.2f}".format(
                gp.log_likelihood(self.Y)))

        return gp
コード例 #18
0
ファイル: tta.py プロジェクト: leiqinga/dachuang1
def build_quasirandom_transforms(num_transforms,
                                 color_sigma,
                                 zoom_range,
                                 rotation_range,
                                 shear_range,
                                 translation_range,
                                 do_flip=True,
                                 allow_stretch=False,
                                 skip=0):
    gen = ghalton.Halton(10)
    uniform_samples = np.array(gen.get(num_transforms + skip))[skip:]

    tfs = []
    for s in uniform_samples:
        rotation = uniform(s[0], *rotation_range)
        shift_x = uniform(s[1], *translation_range)
        shift_y = uniform(s[2], *translation_range)
        translation = (shift_x, shift_y)

        # setting shear last because we're not using it at the moment
        shear = uniform(s[9], *shear_range)

        if do_flip:
            flip = bernoulli(s[8], p=0.5)
        else:
            flip = False

        log_zoom_range = [np.log(z) for z in zoom_range]
        if isinstance(allow_stretch, float):
            log_stretch_range = [-np.log(allow_stretch), np.log(allow_stretch)]
            zoom = np.exp(uniform(s[6], *log_zoom_range))
            stretch = np.exp(uniform(s[7], *log_stretch_range))
            zoom_x = zoom * stretch
            zoom_y = zoom / stretch
        elif allow_stretch is True:  # avoid bugs, f.e. when it is an integer
            zoom_x = np.exp(uniform(s[6], *log_zoom_range))
            zoom_y = np.exp(uniform(s[7], *log_zoom_range))
        else:
            zoom_x = zoom_y = np.exp(uniform(s[6], *log_zoom_range))
        # the range should be multiplicatively symmetric, so [1/1.1, 1.1] instead of [0.9, 1.1] makes more sense.

        tfs.append(
            data.build_augmentation_transform((zoom_x, zoom_y), rotation,
                                              shear, translation, flip))

    color_vecs = [
        normal(s[3:6], avg=0.0, std=color_sigma) for s in uniform_samples
    ]

    return tfs, color_vecs
コード例 #19
0
ファイル: mixed_logit.py プロジェクト: ajgara/choice-models
    def __init__(self, products, mus, sigmas):
        super(MixedLogitModel, self).__init__(products)
        if len(mus) != len(products):
            raise Exception('Mus should be equal to amount of products.')
        if len(sigmas) != len(products):
            raise Exception('Sigmas should be equal amount of products.')
        self.products = products
        self.mus = mus
        self.sigmas = sigmas

        self.NUMBER_SAMPLES = 1000
        self.random_numbers = ghalton.Halton(len(self.products)).get(
            self.NUMBER_SAMPLES)
        self.random_numbers = norm.ppf(self.random_numbers, loc=0.0, scale=1.0)
コード例 #20
0
def lrIterator(num_configs=50, initial_range=(-4, -1), decay_range=(0, 0.01)):
    sequencer = ghalton.Halton(2)
    all_points = np.asarray(sequencer.get(num_configs))

    initial_start = initial_range[0]
    initial_stop = initial_range[1]
    decay_start = decay_range[0]
    decay_stop = decay_range[1]
    for points in all_points:
        initial = np.power(
            10, initial_start + (points[0] * (initial_stop - initial_start)))
        decay = decay_start + (points[1] * (decay_stop - decay_start))
        config = LRConfig(initial=initial, decay=decay)
        yield config
コード例 #21
0
 def rand(self, n, obs_dim, quasi=False):
     if quasi:
         import ghalton
         a = -np.ones((n, obs_dim))
         for node in self.path:
             sequencer = ghalton.Halton(node.parameter.dim)
             a[:, node.bfs_index] = node.local_id
             a[:, node.param_axes] = sequencer.get(n)
     else:
         a = -np.random.rand(n, obs_dim)
         for node in self.path:
             a[:, node.bfs_index] = node.local_id
             a[:, node.param_axes] *= -1
     return a
def calc_grid_v2(cell_resolution, max_min, method='grid', X=None, M=None):
    """
    :param cell_resolution: resolution to hinge RBFs as (x_resolution, y_resolution)
    :param max_min: realm of the RBF field as (x_min, x_max, y_min, y_max)
    :param X: a sample of lidar locations
    :return: numpy array of size (# of RNFs, 2) with grid locations
    """
    if max_min is None:
        # if 'max_min' is not given, make a boundarary based on X
        # assume 'X' contains samples from the entire area
        expansion_coef = 1.2
        x_min, x_max = expansion_coef * X[:, 0].min(
        ), expansion_coef * X[:, 0].max()
        y_min, y_max = expansion_coef * X[:, 1].min(
        ), expansion_coef * X[:, 1].max()
    else:
        x_min, x_max = max_min[0], max_min[1]
        y_min, y_max = max_min[2], max_min[3]

    if method == 'grid':  # on a regular grid
        xvals = np.arange(x_min, x_max, cell_resolution[0])
        yvals = np.arange(y_min, y_max, cell_resolution[1])
        xx, yy = np.meshgrid(xvals, yvals)
        grid = np.hstack((xx.ravel()[:, np.newaxis], yy.ravel()[:,
                                                                np.newaxis]))
    else:  # sampling
        D = 2
        if M is None:
            xsize = np.int((x_max - x_min) / cell_resolution[0])
            ysize = np.int((y_max - y_min) / cell_resolution[1])
            M = np.int((x_max - x_min) / cell_resolution[0]) * np.int(
                (y_max - y_min) / cell_resolution[1])
        if method == 'mc':
            grid = np.random.uniform(0, 1, (M, D))
        elif method == 'halton':
            grid = np.array(gh.Halton(D).get(M))
        elif method == 'ghalton':
            grid = np.array(gh.GeneralizedHalton(gh.EA_PERMS[:D]).get(int(M)))
        elif method == 'sobol':
            grid = sobol_gen(D, M, 7)
        else:
            grid = None

        grid[:, 0] = x_min + (x_max - x_min) * grid[:, 0]
        grid[:, 1] = y_min + (y_max - y_min) * grid[:, 1]

    return grid
コード例 #23
0
def gen_circle(R, center, r):
    seq = ghalton.Halton(2)
    uniform = np.array(seq.get(200))

    circle = np.empty([0, 2])
    for pt in uniform:
        if np.dot(pt - 0.5, pt - 0.5) < 0.25:
            circle = np.vstack((circle, 2 * pt - 1))

    pts = circle * (R - r - 2) + center
    pts = pts[0:100]

    fig, ax = plt.subplots()
    ax.add_artist(plt.Circle([center, center], R - r, ec='C1', fc='none'))
    plt.scatter(pts[:, 0], pts[:, 1])
    plt.show()

    return pts
コード例 #24
0
def paramsTable(keys, maxs, mins, num=2000, thread=4, large=1e5):
    """
   :param dim: type integer, number of parameters
   :param num: type integer, number of sampling points for Monte Carlo Simulation
   :param thread: type integer, number of thread for each parallel simulation
   :param maxs: type tuples, maximums ranges of parameters
   :param mins: type tuples, minimums ranges of parameters
   :param keys: type strings, names of parameters
   :param large: type float, generate halton number on the powers if above this value
	"""
    dim = len(keys)
    sequencer = ghalton.Halton(dim)
    table = sequencer.get(num)
    for i in xrange(dim):
        for j in xrange(num):
            # for other parameters of small values
            if mins[i] < large:
                mean = .5 * (maxs[i] + mins[i])
                std = .5 * (maxs[i] - mins[i])
                table[j][i] = mean + (table[j][i] - .5) * 2 * std
            # for parameters of large values like Young's modulus, use halton numbers on the powers
            if mins[i] >= large:
                powMax = log10(maxs[i])
                powMin = log10(mins[i])
                meanPow = .5 * (powMax + powMin)
                stdPow = .5 * (powMax - powMin)
                power = meanPow + (table[j][i] - .5) * 2 * stdPow
                table[j][i] = 10**power
    # output parameter table with thread number for each Yade simulation session
    fout = file('table.dat', 'w')
    fout.write(' '.join(['!OMP_NUM_THREADS', 'key'] + keys + ['\n']))
    for j in xrange(num):
        fout.write(' '.join(['%2i' % thread, '%9i' % j] +
                            ['%15.5e' % table[j][i]
                             for i in xrange(dim)] + ['\n']))
    fout.close()

    # prepare parameter table for the particle-filter calibration
    fout = file('particle.txt', 'w')
    for j in xrange(num):
        for i in xrange(dim):
            fout.write('%15.5e' % table[j][i])
        fout.write('\n')
    fout.close()
コード例 #25
0
 def generate(self,cols,ranges,num): 
     global halton_method
     if halton_method == 'ghalton':
         # use the ghalton package
         sequencer = ghalton.Halton(len(cols))
         sequencer.seed(self.seed)
         sequencer.get(self.tot_num)
         seq = sequencer.get(num)
         self.tot_num += num
         low = np.asarray([ranges[col][0] for col in cols])
         high = np.asarray([ranges[col][1] for col in cols])
         return low + (high - low) * seq
     elif halton_method == 'halton_qmc':
         # use the halton_qmc module
         seq = halton_sequence(self.tot_num+1, self.tot_num+num, len(cols)).reshape(num,-1)
         self.tot_num += num
         low = np.asarray([ranges[col][0] for col in cols])
         high = np.asarray([ranges[col][1] for col in cols])
         return low + (high - low) * seq
コード例 #26
0
ファイル: utils.py プロジェクト: cliffckerr/kelfi
def halton_sequence(n_points=1, n_dims=1):
    """
    Generate Quasi Monte Carlo samples using the Halton sequence.
    
    Parameters
    ----------
    n_points : int
        The number of data points to generate
    n_dims : int
        The number of dimensions to generate the samples in

    Returns
    -------
    numpy.ndarray
        A dataset of size (n_points, n_dims)
    """
    sequencer = gh.Halton(n_dims)
    points = np.array(sequencer.get(n_points))
    return points
コード例 #27
0
def simulate(nps, num_simulations, number_of_materials,
             num_uniform_simulations, include_first_wall, outputfile):
    breeder_material_names = ['Li', 'Li4SiO4', 'Li2TiO3']
    results = []
    if num_uniform_simulations != 0:
        for i in tqdm(range(0, num_uniform_simulations + 1)):

            enrichment_fractions_simulation = []
            breeder_material_name = random.choice(breeder_material_names)

            for j in range(0, number_of_materials):
                enrichment_fractions_simulation.append(
                    (1.0 / num_uniform_simulations) * i)

            inner_radius = 500
            thickness = 100

            result = find_tbr_dict(enrichment_fractions_simulation,
                                   breeder_material_name, include_first_wall,
                                   nps)
            results.append(result)

        print('finished uniform blanket simulations')

    sequencer = ghalton.Halton(number_of_materials)
    for i in tqdm(range(0, num_simulations)):
        os.system('rm *.h5')
        enrichment_fractions_simulation = []
        breeder_material_name = random.choice(breeder_material_names)

        enrichment_fractions_simulation = sequencer.get(1)[0]

        inner_radius = 500
        thickness = 100

        result = find_tbr_dict(enrichment_fractions_simulation,
                               breeder_material_name, include_first_wall, nps)
        results.append(result)

    with open(outputfile, 'w') as file_object:
        json.dump(results, file_object, indent=2)
コード例 #28
0
def initParamsTable(keys,
                    maxs,
                    mins,
                    num=100,
                    threads=4,
                    tableName='smcTable0.txt',
                    simNum=0):
    """
    Generate initial parameter samples using a halton sequence
    and write the samples into a text file

    :param keys: list of strings, names of parameters

    :param maxs: list of floats, upper bounds of parameter values

    :param mins: list of floats, lower bounds of parameter values

    :param num: int, default=100, number of samples for Sequential Monte Carlo

    :param threads: int, default=4, number of threads for each model evaluation

    :param tableName: string, Name of the parameter table

    :return:
        table: ndarray of shape (num, len(keys)), initial parameter samples

        tableName: string, default='smcTable.txt'
    """
    print(tableName)
    dim = len(keys)
    sequencer = ghalton.Halton(dim)
    table = sequencer.get(num)
    for i in range(dim):
        for j in range(num):
            mean = .5 * (maxs[i] + mins[i])
            std = .5 * (maxs[i] - mins[i])
            table[j][i] = mean + (table[j][i] - .5) * 2 * std
    # write parameters in the format for Yade batch mode
    writeToTable(tableName, table, dim, num, threads, keys, simNum)
    return np.array(table), tableName
コード例 #29
0
ファイル: generate.py プロジェクト: thetianshuhuang/bmcc_sim
def make_phase_3():

    for k in [5, 10, 15, 20, 30, 40, 50, 60, 70, 80, 100]:

        dst = get_dirname(3, 50 * k, k, 1.0, makedir=True)

        print(dst)
        for _ in tqdm(range(100)):
            # means = [
            #     (40 * k)**(1/3) * np.random.uniform(
            #         low=-0.5, high=0.5, size=3)
            #     for _ in range(k)
            # ]
            means = [
                (40 * k)**(1 / 3) * np.array(x)
                for x in ghalton.Halton(3).get(k)
            ]

            ds = bmcc.GaussianMixture(
                n=50 * k, k=k, d=3, r=1.0, df=3,
                symmetric=False, shuffle=False, means=means)
            ds.save(os.path.join(dst, str(uuid.uuid4())))
コード例 #30
0
 def init_sequence(self):
     if self.sequence_type == QMC_SEQUENCE.HALTON:
         if self.scramble_type == QMC_SCRAMBLING.OWEN17:
             self.points = tf.Variable(
                 initial_value=tfp.mcmc.sample_halton_sequence(dim=self.D,
                                                               num_results=self.N,
                                                               dtype=self.tfdt,
                                                               randomized=True,
                                                               seed=self.seed),
                 dtype=self.tfdt,
                 trainable=False)
         elif self.scramble_type == QMC_SCRAMBLING.GENERALISED:
             if self.qmckwargs[QMC_KWARG.PERM] is None:
                 perm = gh.EA_PERMS[:self.D]  # Default permutation
             else:
                 perm = self.qmckwargs[QMC_KWARG.PERM]
             self.sequencer = gh.GeneralizedHalton(perm)
             self.points = tf.constant(
                 np.array(self.sequencer.get(int(self.N))), dtype=self.tfdt)
         else:
             self.sequencer = gh.Halton(int(self.D))
             self.points = tf.constant(
                 np.array(self.sequencer.get(int(self.N))), dtype=self.tfdt)