Esempio n. 1
0
def hit_or_miss(n, generator_method="pseudo"):
	#int -> float (gamma hat), boolean (is relative error below 0.0005)
	'''
	Receives an integer n that will be used to generate n points
	and a random generating method ('pseudo' or 'quasi') and 
	generate the value of gamma hat, estimation for the integral
	of f(x) in the interval [0, 1], and its variance
	'''
	gamma_hat = 0
	hit_or_miss_sequencer = ghalton.GeneralizedHalton(2)
	for i in range(n):
		if generator_method=="pseudo":
			x = np.random.uniform(low=0, high=1)
			y = np.random.uniform(low=0, high=1)
		else:
			two_numbers = hit_or_miss_sequencer.get(1)[0]
			x = two_numbers[0]
			y = two_numbers[1]
		f_x = f(x)
		if y <= f_x:
			gamma_hat += 1

	gamma_hat = gamma_hat/n
	variance_gamma_hat = gamma_hat*(1 - gamma_hat)

	standard_error = math.sqrt(variance_gamma_hat/n)
	relative_error = standard_error/gamma_hat
	is_error_below_threshold = True if 1.65*relative_error < 0.0005 else False

	return gamma_hat, relative_error, is_error_below_threshold
Esempio n. 2
0
    def setRenderSamples(self, samples):
        self.render_samples = samples
        self.render_sample_num = 0
        self.quad_fs.program['aa_passes'].value = self.render_samples

        sequencer = ghalton.GeneralizedHalton(ghalton.EA_PERMS[:2])
        self.sampling_points = sequencer.get(samples)
Esempio n. 3
0
def halton_generator(d):
    import ghalton
    seed = random.randint(0, 1000)
    #sequencer = ghalton.Halton(d)
    sequencer = ghalton.GeneralizedHalton(d, seed)
    #sequencer.reset()
    while True:
        [weights] = sequencer.get(1)
        yield np.array(weights)
Esempio n. 4
0
    def quasi_gaussian_samples(self):
        """ Generate standard Gaussian samples using low-discrepancy Halton seq
			with inverse transform """

        g = ghalton.GeneralizedHalton(100)
        unif = np.array(g.get(int(self.n_steps * self.n_paths / 100) +
                              1)).flatten()
        unif = unif[:self.n_steps * self.n_paths]
        z_mat = ndtri(unif).reshape((self.n_steps, self.n_paths))

        return z_mat
    def _drawCoeff(self, rffKernel, m):
        if self.quasiRandom:
            perms = ghalton.EA_PERMS[:self.d]
            sequencer = ghalton.GeneralizedHalton(perms)
            points = np.array(sequencer.get(m+1))[1:]
            freqs = rffKernel.invCDF(points)
            return freqs / self.sigma.reshape(1, len(self.sigma))

        else:
            freqs = rffKernel.sampleFreqs((m, self.d))
            return freqs / self.sigma.reshape(1, len(self.sigma))
Esempio n. 6
0
    def __init__(self,
                 connection,
                 space,
                 clear_db=False,
                 random_state=None,
                 permutations=None,
                 skip=0):
        super(QuasiRandom, self).__init__(connection, space, clear_db)
        self.skip = skip
        if permutations == "ea":
            self.seq = ghalton.GeneralizedHalton(
                ghalton.EA_PERMS[:len(self.space)])
        elif permutations is not None:
            self.seq = ghalton.GeneralizedHalton(permutations)
        elif random_state is not None:
            self.seq = ghalton.GeneralizedHalton(len(self.space), random_state)
        else:
            self.seq = ghalton.GeneralizedHalton(len(self.space))

        self.drawn = 0
Esempio n. 7
0
    def generate_random_colors():
        import colorsys
        import ghalton

        perms = ghalton.EA_PERMS[:1]
        sequencer = ghalton.GeneralizedHalton(perms)
        while True:
            x = sequencer.get(1)[0][0]
            HSV_tuple = (x, 1, 0.6)
            rgb_colour = colorsys.hsv_to_rgb(*HSV_tuple)
            yield (*rgb_colour, 1.0)  # add alpha channel
Esempio n. 8
0
    def __init__(self,
                 n_frequencies,
                 dim,
                 kernel_type=list(kernel_samplers.keys())[0],
                 noise_stddev=1e-2,
                 lengthscale=1.,
                 signal_stddev=1.,
                 mean_function=None,
                 dtype=torch.float32,
                 device=None):
        """
        Constructor.
        
        Parameters:
            n_frequencies (int): Number of Fourier frequencies to generate

            dim (int): Dimensionality of the input domain

            kernel_type (str): String specifying which kernel to use (default: 'squared_exponential')

            noise_stddev (float or torch.Tensor): Standard deviation for the Gaussian observation noise model
            (default: 1e-4)

            lengthscale (float or torch.Tensor): Length-scale of the GP kernel (default: 1.0)

            signal_stddev (float or torch.Tensor): Signal standard deviation, i.e. a multiplicative scaling factor for
            the feature maps (default: 1.0)

            mean_function (ssgp.mean_functions.AbstractMeanFunction): GP prior mean function. If None, a zero-mean prior
            is used. (default: None)
        """
        super().__init__()
        self.dtype = dtype
        self.device = device

        self.dim = dim
        self.n_frequencies = n_frequencies
        self.noise_stddev = self.ensure_torch(noise_stddev)

        perm = gh.EA_PERMS[:dim]
        sequencer = gh.GeneralizedHalton(perm)
        base_freqs = self.ensure_torch(sequencer.get(int(n_frequencies)))

        self.raw_spec = ISSGPR.kernel_samplers[kernel_type](base_freqs)
        self._set_lengthscale(lengthscale)
        self.signal_stddev = self.ensure_torch(signal_stddev)
        self.training_mat = None
        self.training_vec = None
        self.clear_data()
        if mean_function is None:
            mean_function = mean_functions.ZeroMean(dtype=self.dtype,
                                                    device=self.device)
        self.mean_function = mean_function
 def init_sequencer(self):
     # ---------------------------------------#
     #                HALTON                  #
     # ---------------------------------------#
     if self.sequence_type == QMC_SEQUENCE.HALTON:
         if self.scramble_type == QMC_SCRAMBLING.GENERALISED:
             if self.qmc_kwargs[QMC_KWARG.PERM] is None:
                 perm = gh.EA_PERMS[:self.d]  # Default permutation
             else:
                 perm = self.qmc_kwargs[QMC_KWARG.PERM]
             self.sequencer = gh.GeneralizedHalton(perm)
         else:
             self.sequencer = gh.Halton(int(self.d))
def calc_grid_v2(cell_resolution, max_min, method='grid', X=None, M=None):
    """
    :param cell_resolution: resolution to hinge RBFs as (x_resolution, y_resolution)
    :param max_min: realm of the RBF field as (x_min, x_max, y_min, y_max)
    :param X: a sample of lidar locations
    :return: numpy array of size (# of RNFs, 2) with grid locations
    """
    if max_min is None:
        # if 'max_min' is not given, make a boundarary based on X
        # assume 'X' contains samples from the entire area
        expansion_coef = 1.2
        x_min, x_max = expansion_coef * X[:, 0].min(
        ), expansion_coef * X[:, 0].max()
        y_min, y_max = expansion_coef * X[:, 1].min(
        ), expansion_coef * X[:, 1].max()
    else:
        x_min, x_max = max_min[0], max_min[1]
        y_min, y_max = max_min[2], max_min[3]

    if method == 'grid':  # on a regular grid
        xvals = np.arange(x_min, x_max, cell_resolution[0])
        yvals = np.arange(y_min, y_max, cell_resolution[1])
        xx, yy = np.meshgrid(xvals, yvals)
        grid = np.hstack((xx.ravel()[:, np.newaxis], yy.ravel()[:,
                                                                np.newaxis]))
    else:  # sampling
        D = 2
        if M is None:
            xsize = np.int((x_max - x_min) / cell_resolution[0])
            ysize = np.int((y_max - y_min) / cell_resolution[1])
            M = np.int((x_max - x_min) / cell_resolution[0]) * np.int(
                (y_max - y_min) / cell_resolution[1])
        if method == 'mc':
            grid = np.random.uniform(0, 1, (M, D))
        elif method == 'halton':
            grid = np.array(gh.Halton(D).get(M))
        elif method == 'ghalton':
            grid = np.array(gh.GeneralizedHalton(gh.EA_PERMS[:D]).get(int(M)))
        elif method == 'sobol':
            grid = sobol_gen(D, M, 7)
        else:
            grid = None

        grid[:, 0] = x_min + (x_max - x_min) * grid[:, 0]
        grid[:, 1] = y_min + (y_max - y_min) * grid[:, 1]

    return grid
Esempio n. 11
0
def generate_colors(number_required, seed):
    """
    Generate a list of length number of distinct "good" random colors

    See: https://github.com/fmder/ghalton

    Based on http://martin.ankerl.com/2009/12/09/
            how-to-create-random-colors-programmatically/

    :param number_required: int
    :param seed: the random seed

    :type: int
    :type: int

    :rtype: a list of lists in the form: [[243, 137, 121], [232, 121, 243],
                                          [216, 121, 243]]
    """
    rgb_list = []
    if HALTON is True:
        sequencer = ghalton.GeneralizedHalton(3, seed)
        points = sequencer.get(int(number_required))
        for p in points:
            print p
            rgb_list.append(p)
    else:
        golden_ratio_conjugate = 0.618033988749895
        random.seed(seed)
        h = random.random()
        for i in range(0, int(number_required)):
            # h = random.random()
            h += golden_ratio_conjugate
            h %= 1
            # h = (random.random()+golden_ratio_conjugate) % 1
            rgb = hsv_to_rgb(h, 0.5, 0.6)
            rgb = rgb[0] / 255.0, rgb[1] / 255.0, rgb[2] / 255.0
            rgb_list.append(rgb)
    return rgb_list
 def init_sequence(self):
     if self.sequence_type == QMC_SEQUENCE.HALTON:
         if self.scramble_type == QMC_SCRAMBLING.OWEN17:
             self.points = tf.Variable(
                 initial_value=tfp.mcmc.sample_halton_sequence(dim=self.D,
                                                               num_results=self.N,
                                                               dtype=self.tfdt,
                                                               randomized=True,
                                                               seed=self.seed),
                 dtype=self.tfdt,
                 trainable=False)
         elif self.scramble_type == QMC_SCRAMBLING.GENERALISED:
             if self.qmckwargs[QMC_KWARG.PERM] is None:
                 perm = gh.EA_PERMS[:self.D]  # Default permutation
             else:
                 perm = self.qmckwargs[QMC_KWARG.PERM]
             self.sequencer = gh.GeneralizedHalton(perm)
             self.points = tf.constant(
                 np.array(self.sequencer.get(int(self.N))), dtype=self.tfdt)
         else:
             self.sequencer = gh.Halton(int(self.D))
             self.points = tf.constant(
                 np.array(self.sequencer.get(int(self.N))), dtype=self.tfdt)
Esempio n. 13
0
def generate_generalized_halton(n: int, d: int):
    sequencer = ghalton.GeneralizedHalton(ghalton.EA_PERMS[:d])
    return sequencer.get(n)
Esempio n. 14
0
# Nicholas Gialluca Domene
# Número USP 8543417
# Felipe de Moura Ferreira
# Número USP 9864702
# 23 de maio de 2021

import math
import numpy as np
from scipy.stats import beta
from scipy.stats.stats import pearsonr
import random
import datetime
import ghalton
random_seed = 1
np.random.seed(random_seed)
sequencer = ghalton.GeneralizedHalton(1, random_seed)
'''
- As an Improvement on your 2nd Programming Exercise,
consider replacing the Pseudo Random Number Generator
by a Quasi Random Number Generator.

- Do your Monte Carlo integration routines work better?
Empirically, how faster are now your integration routines?

- You should carefully explain how and why you did
your empirical analysis and reached your conclusions.
'''

def f(x):
	cpf = 0.45361387819
	rg  = 0.384850546
Esempio n. 15
0
# GET CHANNEL FOR GS GENERATION
channel = np.load(channel_path)
channel_im = np.reshape(channel,(image_size,image_size))
plt.imsave(output_dir + channel_name + "_channel.png", channel_im, cmap="gray")

#
print("channel.shape",channel.shape)
 
channel_mask = channel <= threshold
mask_im = np.reshape(channel_mask, (image_size, image_size))
plt.imsave(output_dir + channel_name + "_channel_mask_thres_" + str(threshold) + ".png", mask_im, cmap="gray")

# Generate 2D Halton points in [0,1] range
# for a given seed
sequencer = ghalton.GeneralizedHalton(2,0)
for i in range(seed+1):
    print("sequencer i: ", i)
    points = sequencer.get(n_patches)
    
n_iter=seed

# Halton points in the range of image size
#Se generan tantos Halton points como numero de patches
#Recorro todos los halton points para sacar su primera y segunda
# coordenada y los almaceno en una lista
the_list = [None] * n_patches
for i in range(n_patches):
    a= int(points[i][0]*image_size)
    b= int(points[i][1]*image_size)
    the_list[i] = [a,b]
Esempio n. 16
0
def generate_points(seed, npoints, dim=2):
    sequencer = ghalton.GeneralizedHalton(dim, seed)
    return np.asarray(sequencer.get(npoints), dtype='float32')
Esempio n. 17
0
def generate_halton_weights(d, n):
    D = get_D(d, n)
    sequencer = ghalton.GeneralizedHalton(d)
    points = np.array(sequencer.get(D))
    M = norm.ppf(points)
    return M, None
Esempio n. 18
0
from blackscholes import EuropeanOptionPricer

params = {'S': 10, 'K': 9, 'T': 1, 'sigma': 0.10, 'r': 0.06}

# get benchmark
bs = EuropeanOptionPricer(**params)
Ctrue = bs.get_call_premium()

# using Quasi Monte Carlo
factor = params['S'] * np.exp(
    (params['r'] - 0.5 * params['sigma']**2) * params['T'])
std = params['sigma'] * math.sqrt(params['T'])

seed = 2000
seqr = ghalton.GeneralizedHalton(1, seed)
# print(seqr)
# print(seqr.get(int(1e2)))

aMList = []
aMError = []

# Quasi Monte Carlo
for M in [1e2, 1e3, 1e4, 1e5, 1e5, 1e6]:
    M = int(M)
    X = np.array(seqr.get(M))
    Z = norm.ppf(X)
    factorArray = std * Z
    sArray = factor * np.exp(factorArray)
    payoffArray = sArray - params['K']
    payoffArray[payoffArray < 0] = 0
Esempio n. 19
0
def halton_generator(d):
    import ghalton
    # sequencer = ghalton.Halton(d)
    sequencer = ghalton.GeneralizedHalton(d, random.randint(0, 1000))
    while True:
        yield sequencer.get(1)[0]
Esempio n. 20
0
def quasi_rand_num_generator(n, m, seed=1126):
    seqr = ghalton.GeneralizedHalton(n, seed)
    X = np.array(seqr.get(m))
    return stats.norm.ppf(X).astype(np.float32)
Esempio n. 21
0
def halton(count=1, dimensionality=2, seed=0):
    sequencer = ghalton.GeneralizedHalton(dimensionality, seed)
    return np.array(sequencer.get(count))
Esempio n. 22
0
def halton_sampling(no_random, no_samples):
    perm = get_perm_list(no_random)
    seq = ghalton.GeneralizedHalton(perm)
    return seq.get(no_samples)