예제 #1
0
def fast_sample(model, n_samples, ml_estimation=False):
    draw_mu = model.forward(torch.zeros(model.input_dim))
    draw_sigma = model.forward(torch.ones(model.input_dim))
    if ml_estimation:
        draw_sigma = {
            key: 0.0 * (elem - draw_mu[key])
            for key, elem in draw_sigma.items()
        }
    else:
        draw_sigma = {
            key: elem - draw_mu[key]
            for key, elem in draw_sigma.items()
        }

    pi_mu = draw_mu['pi_unconstrained'].cpu().data.numpy()[0]
    pi_sigma = draw_sigma['pi_unconstrained'].cpu().data.numpy()[0]

    k = pi_mu.shape[0] + 1
    ks = np.argmax(np.hstack([npr.gumbel(size=[n_samples, k-1])+pi_mu +\
         npr.randn(n_samples, pi_mu.shape[0])*pi_sigma,\
         npr.gumbel(size=[n_samples, 1])]), axis=1)
    draw_mu.pop('pi_unconstrained')
    draw_sigma.pop('pi_unconstrained')

    param_mu = {key : value.cpu().data.numpy()[0, ks] \
        for key, value in draw_mu.items()}

    param_sigma = {key : value.cpu().data.numpy()[0, ks] \
        for key, value in draw_sigma.items()}
    thetas_mu = param_mu['theta_unconstrained']
    thetas_sigma = param_sigma['theta_unconstrained']
    syn_app_data = 1*(smoid(npr.randn(*thetas_sigma.shape)*thetas_sigma+thetas_mu)\
           >npr.rand(*thetas_sigma.shape))
    return syn_app_data
예제 #2
0
def fast_sample(model, variable_types, n_samples):
    draw_mu = model.forward(torch.zeros(model.input_dim))
    draw_sigma = model.forward(torch.ones(model.input_dim))
    draw_sigma = {key: elem - draw_mu[key] for key, elem in draw_sigma.items()}

    sample_data = pd.DataFrame()
    variable_types_copy = variable_types.copy()
    if 'pi_unconstrained' in variable_types.keys():
        pi_mu = draw_mu['pi_unconstrained'].cpu().data.numpy()[0]
        pi_sigma = draw_sigma['pi_unconstrained'].cpu().data.numpy()[0]

        k = pi_mu.shape[0] + 1
        ks = np.argmax(np.hstack([npr.gumbel(size=[n_samples, k-1])+pi_mu +\
            npr.randn(n_samples, pi_mu.shape[0])*pi_sigma,\
            npr.gumbel(size=[n_samples, 1])]), axis=1)

        draw_mu.pop('pi_unconstrained')
        draw_sigma.pop('pi_unconstrained')
        variable_types_copy.pop('pi_unconstrained')

    else:
        k = draw_mu['Target'].shape[1]
        ks = npr.randint(k, size=n_samples)

    cont_features = [
        key for key, value in variable_types_copy.items() if value == 'Beta'
    ]
    param_mu = {key : value.cpu().data.numpy()[0, ks] if key not in cont_features\
        else value.cpu().data.numpy()[0,:,ks]\
        for key, value in draw_mu.items()}

    param_sigma = {key : value.cpu().data.numpy()[0, ks] if key not in cont_features\
        else value.cpu().data.numpy()[0,:,ks]\
        for key, value in draw_sigma.items()}
    for key, dist in variable_types_copy.items():
        if dist == 'Categorical':
            d = param_mu[key].shape[1]
            sample_data[key] = np.argmax(npr.gumbel(size=[n_samples, d])+param_mu[key] +\
                  npr.randn(n_samples, d)*param_sigma[key], axis=1)

        elif dist == 'Bernoulli':
            sample_data[key] = 1 * (
                smoid(param_mu[key] + npr.randn(n_samples) * param_sigma[key])
                > npr.rand(n_samples))

        elif dist == 'Beta':
            a, b = np.exp(
                (param_mu[key] + npr.randn(n_samples, 2) * param_sigma[key]).T)
            sample_data[key] = npr.beta(a, b)

    return sample_data
예제 #3
0
def width_gumbel(max_width, num_profiles, num_samples):
    """Gumbel width distribution

    Generates scales in U[0, max_width]
    """
    scales = rand.uniform(0, max_width, num_profiles)
    return rand.gumbel(0, scales, (num_samples, num_profiles)).T
예제 #4
0
 def computeAllIndex(self):
     """ Compute the current indexes for all arms, in a vectorized manner."""
     beta_t = np.sqrt(self.C**2 / self.pulls)
     z_t = rn.gumbel(0, 1, self.nbArms)  # vector samples
     indexes = (self.rewards / self.pulls) + beta_t * z_t
     indexes[self.pulls < 1] = float('+inf')
     self.index[:] = indexes
예제 #5
0
 def time_to_mutation_rate(tree):
     if not hasattr(GC,"NUMPY_SEEDED"):
         from numpy.random import seed as numpy_seed
         numpy_seed(seed=GC.random_number_seed)
         GC.random_number_seed += 1
         GC.NUMPY_SEEDED = True
     t = read_tree_newick(tree)
     for node in t.traverse_preorder():
         if node.edge_length is not None:
             node.edge_length *= gumbel(loc=GC.tree_rate_loc,scale=GC.tree_rate_scale)
     return str(t)
예제 #6
0
def gumbel_noise(scale, samples, flip_prob=0.5):
    """
	Generate random noise according to a gumbel distribution.

	Gumbel distributions are skewed, so the default setting of the flip_prob
	parameter makes it equally likely to be skewed positive or negative

	"""
    location = -0.5772 * scale
    multiplier = -1 if (U(0, 1) < flip_prob) else 1
    return multiplier * gumbel(location, scale, samples)
예제 #7
0
파일: Methods.py 프로젝트: Mv77/DDCex
def sim_dataset(theta, R, nmachines, n_per_machine, beta):

    # First solve the choice specific value functions for both parameter sets
    V0 = np.zeros((5, 2))
    tol = 1e-6  # Tolerance

    V = findFX(V0, theta, R, beta, tol, disp=False)

    data = pd.DataFrame(np.zeros((nmachines * n_per_machine, 4)),
                        columns=['Id', 'T', 'a', 'i'])

    ind = 0
    for m in range(nmachines):

        # Initialize state
        a_next = rnd.randint(5) + 1

        for t in range(n_per_machine):

            a = a_next

            # Assign id and time
            data.loc[ind, 'Id'] = m
            data.loc[ind, 'T'] = t

            data.loc[ind, 'a'] = a

            u_replace = V[a - 1][1] + rnd.gumbel()
            u_not = V[a - 1][0] + rnd.gumbel()

            if u_replace < u_not:
                data.loc[ind, 'i'] = 0
                a_next = min(5, a + 1)
            else:
                data.loc[ind, 'i'] = 1
                a_next = 1

            ind = ind + 1

    return (data)
예제 #8
0
def stochastic_beam_k(spec, blocks, bigram_gen, k):
    bg = bigram_gen.gen_bigram(spec, blocks)
    bg_p = BigramPolicy(bg)
    # each element in the fringe is a triple
    # g_phi_s = the gumbled logpr of partial sequence
    # phi_s = the logpr of partial seq
    # s = the partial seq
    fringe = [(0, 0, (-1, ))]
    for i in range(5):
        new_fringe = []
        for g_phi_s, phi_s, s in fringe:
            z = float('-inf')
            nxt_probs = bg_p.bigram[s[-1]]
            # the next sequence
            ss = []
            # the next log_prob
            phi_ss = []
            # the gumbeled next log_prob
            g_phi_ss = []
            # the hat gumbeled next log_prob
            ghat_phi_ss = []
            for next_action, next_prob in enumerate(nxt_probs):
                ss.append(s + (next_action, ))
                item_phi_ss = phi_s + np.log(next_prob)
                phi_ss.append(item_phi_ss)
                item_g_phi_ss = gumbel(item_phi_ss)
                z = max(z, item_g_phi_ss)
                g_phi_ss.append(item_g_phi_ss)

            for i in range(len(nxt_probs)):
                item_ghat_phi_ss = -np.log(
                    np.exp(-g_phi_s) - np.exp(-z) + np.exp(-g_phi_ss[i]))
                ghat_phi_ss.append(item_ghat_phi_ss)

            for xx in zip(ghat_phi_ss, phi_ss, ss):
                new_fringe.append(xx)

            fringe = list(reversed(sorted(new_fringe)))[:k]

    for x in fringe:
        seq = x[2][1:]
        rec_spec = get_spec(drop_blocks(blocks, seq))
        gots = np.all(rec_spec == spec)
        if gots:
            return True
    return False
예제 #9
0
    def computeIndex(self, arm):
        r""" Take a random index, at time t and after :math:`N_k(t)` pulls of arm k:

        .. math::

           I_k(t) &= \frac{X_k(t)}{N_k(t)} + \beta_k(t) Z_k(t), \\
           \text{where}\;\; \beta_k(t) &:= \sqrt{C^2 / N_k(t)}, \\
           \text{and}\;\; Z_k(t) &\sim \mathrm{Gumbel}(0, 1).

        Where :math:`\mathrm{Gumbel}(0, 1)` is the standard Gumbel distribution.
        See [Numpy documentation](https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.gumbel.html#numpy.random.gumbel) or [Wikipedia page](https://en.wikipedia.org/wiki/Gumbel_distribution) for more details.
        """
        if self.pulls[arm] < 1:
            return float('+inf')
        else:
            beta_k_t = np.sqrt(self.C**2 / self.pulls[arm])
            z_k_t = rn.gumbel(0, 1)
            return (self.rewards[arm] / self.pulls[arm]) + beta_k_t * z_k_t
예제 #10
0
def gumbel_max_sample(x):
    z = gumbel(loc=0, scale=1, size=x.shape)
    return (x + z).argmax()
예제 #11
0
    b=nr.poisson(1.5,times)
    
    timecost.append([mid_time-start_time,time.time()-mid_time])

    #weibull
    start_time=time.time()
    a=dsg.weibull(2,1,times)
    mid_time=time.time()
    b=nr.weibull(2,times)
    timecost.append([mid_time-start_time,time.time()-mid_time])

    #gumbel
    start_time=time.time()
    a=dsg.gumbel(2,3,times)
    mid_time=time.time()
    b=nr.gumbel(2,3,times)
    
    timecost.append([mid_time-start_time,time.time()-mid_time])

    #dirichlet
    start_time=time.time()
    a=dsg.dirichlet([1,2,3,4,5],times)
    mid_time=time.time()
    b=nr.dirichlet([1,2,3,4,5],times)

    timecost.append([mid_time-start_time,time.time()-mid_time])

    #multinomial
    start_time=time.time()
    a=dsg.multinomial(2,[0.2,0.3,0.5],1000000)
    mid_time=time.time()
예제 #12
0
파일: utils.py 프로젝트: kunlegiwa/MANGO
def gumbel(size, params):
    try:
        return random.gumbel(params['loc'], params['scale'], size)
    except ValueError as e:
        exit(e)