Пример #1
0
def sample_hyperparameters(state):
    # http://bit.ly/1baZ3zf
    T = state['T']
    num_samples = 10  # R
    aalpha = 5
    balpha = 0.1
    abeta = 0.1
    bbeta = 0.1
    bgamma = 0.1  # ?
    agamma = 5  # ?

    # for (int r = 0; r < R; r++) {
    for r in range(num_samples):
        # gamma: root level (Escobar+West95) with n = T
        eta = beta(state['gamma'] + 1, T).rvs()
        bloge = bgamma - np.log(eta)
        K = state['num_topics']
        pie = 1. / (1. + (T * bloge / (agamma + K - 1)))
        u = bernoulli(pie).rvs()
        state['gamma'] = gamma(agamma + K - 1 + u, 1. / bloge).rvs()

        # alpha: document level (Teh+06)
        qs = 0.
        qw = 0.

        for m, doc in enumerate(state['docs']):
            qs += bernoulli(len(doc) * 1. / (len(doc) + state['alpha'])).rvs()
            qw += np.log(beta(state['alpha'] + 1, len(doc)).rvs())
        state['alpha'] = gamma(aalpha + T - qs, 1. / (balpha - qw)).rvs()

    state = update_beta(state, abeta, bbeta)
    return state
    def test_entropy(self):
        # Simple tests of entropy.
        b = stats.bernoulli(0.25)
        expected_h = -0.25*np.log(0.25) - 0.75*np.log(0.75)
        h = b.entropy()
        assert_allclose(h, expected_h)

        b = stats.bernoulli(0.0)
        h = b.entropy()
        assert_equal(h, 0.0)

        b = stats.bernoulli(1.0)
        h = b.entropy()
        assert_equal(h, 0.0)
	def connectivityMatrixNew(self):

		self.patterns =np.random.normal(0,1, size=(self.p,self.N))
		mybin=np.random.binomial(1,0.5,size=(self.p,self.N))
		#self.patterns =np.multiply(mybin,np.random.normal(-1,1, size=(self.p,self.N)))+np.multiply(1-mybin,np.random.normal(1,1,size=(self.p,self.N)))
		#mu1=0.0
		#sigma1=1.0
		#self.patterns =np.random.lognormal(mu1,sigma1, size=(self.p,self.N))-np.exp(mu1+(sigma1**2)/2.)
		print 'Patterns created. N patterns:',self.p
		patterns_pre=self.patterns
		patterns_post=self.patterns
		
		#creating  connectivity with sparse matrices
		rv=bernoulli(1).rvs
		#connectivity=sparse.csr_matrix(sparse.random(self.N,self.N,density=self.c,data_rvs=rv))
		indexes=sparse.find(sparse.random(self.N,self.N,density=self.c,data_rvs=rv))
		print 'Connectivity created. N patterns:',self.p
		
		#finding the non zero entries
		#index_row=sparse.find(connectivity)[0]
		#index_col=sparse.find(connectivity)[1]
		
		# smart way to write down the outer product learning
		connectivity=(self.Amp/(self.c*self.N))*np.einsum('ij,ij->j',patterns_post[:,indexes[0]],patterns_pre[:,indexes[1]])
		connectivity=sparse.csr_matrix((connectivity,(indexes[0],indexes[1])),shape=(self.N,self.N))
		'Connectivity loaded with patterns. N patterns:',self.p
		self.connectivity=connectivity
Пример #4
0
def sample(hyperparameters, rho, K, F):
    "Sample from the model."
    G = len(rho)
    
    q_alpha = GammaDist(hyperparameters.a_alpha, hyperparameters.b_alpha)
    alpha = q_alpha.sample()
    
    q_beta = GammaDist(hyperparameters.a_beta, hyperparameters.b_beta)
    beta = q_beta.sample()

    q_gamma = GammaDist(hyperparameters.a_gamma, hyperparameters.b_gamma)
    gamma = q_gamma.sample()

    q_lambda = GammaDist(hyperparameters.a_lambda, hyperparameters.b_lambda)
    lambda_ = q_lambda.sample()

    q_tau = DirichletDist(hyperparameters.a_tau)
    tau = q_tau.sample()

    q_omega = DirichletDist(hyperparameters.a_omega)
    omega = q_omega.sample()

    q_pi_bar = BetaDist(numpy.ones(K), gamma * numpy.ones(K))
    pi_bar = q_pi_bar.sample()
    
    pi = numpy.empty_like(pi_bar)
    for k in xrange(K-1):
        pi[k] = pi_bar[k] * (1.-pi_bar[:k]).prod()
    pi[-1] = 1. - pi[:-1].sum()
    if pi[-1] < 0.: # adjust for numerical errors
        pi[-1] = 0.
    
    theta = numpy.random.dirichlet(alpha * pi, size=G)
    phi = numpy.empty((K+1, F))
    phi[0] = numpy.random.dirichlet(lambda_ * omega)
    phi[1:] = numpy.random.dirichlet(beta * tau, size=K)

    # sample the correct number of sites for each gene    
    sites = [None] * G
    for g, rho_g in enumerate(rho):
        v_g = [bernoulli(rho_i) for rho_i in rho_g]
        z_g = [v_gi and discrete_sample(theta[g])+1 or 0 for v_gi in v_g]
        x_g = [discrete_sample(phi[z_gi]) for z_gi in z_g]
        sites[g] = (v_g, z_g, x_g)
    
    result_type = namedtuple('Sample', 'alpha beta gamma lambda_ tau omega pi_bar pi theta phi sites')
    
    return result_type(
        alpha=alpha, 
        beta=beta, 
        gamma=gamma,
        lambda_=lambda_, 
        tau=tau, 
        omega=omega, 
        pi_bar=pi_bar, 
        pi=pi, 
        theta=theta,
        phi=phi,
        sites=sites,
    )
Пример #5
0
    def sample(self, model, evidence):
        # Metropolis-Hastings (based on Kim, Shephard and Chib(1998))
        MAX_REJECT = 100000
        h, sigma_h = evidence['h'], evidence['sigma_h']
        current_phi = evidence['phi']
        alpha = model.hyper_params['alpha_phi']
        mu_h = model.known_params['mu_h']

        g = lambda phi: (alpha[0] - 1)*log((1 + phi)/2.) + (alpha[1] - 1)*log((1 - phi)/2.) - \
                        (h[0] - mu_h)**2*(1 - phi**2)/(2*sigma_h**2) + \
                        1/2.*log(1 - phi**2)

        for i in xrange(MAX_REJECT):
            proposal_mean = sum((h[1:] - mu_h)*(h[:-1] - mu_h))/sum((h[:-1] - mu_h)**2)
            proposal_var = sigma_h**2/sum((h[:-1] - mu_h)**2)
            try:
                proposed_phi = stats.norm(proposal_mean, sqrt(proposal_var)).rvs()
            except:
                pdb.set_trace()
            if -1 < proposed_phi < 1:
                accept_rv = stats.bernoulli(max(min(exp(g(proposed_phi) - g(current_phi)), 1), 0))
                if accept_rv.rvs():
                    self.update_acceptance_rate(1./(i + 1))
                    return proposed_phi

        raise Exception('Metropolis-Hastings rejected too many: %d'%MAX_REJECT)
def test_parameters_sampler_replacement():
    # raise error if n_iter too large
    params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
    sampler = ParameterSampler(params, n_iter=7)
    assert_raises(ValueError, list, sampler)
    # degenerates to GridSearchCV if n_iter the same as grid_size
    sampler = ParameterSampler(params, n_iter=6)
    samples = list(sampler)
    assert_equal(len(samples), 6)
    for values in ParameterGrid(params):
        assert_true(values in samples)

    # test sampling without replacement in a large grid
    params = {'a': range(10), 'b': range(10), 'c': range(10)}
    sampler = ParameterSampler(params, n_iter=99, random_state=42)
    samples = list(sampler)
    assert_equal(len(samples), 99)
    hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
                        for p in samples]
    assert_equal(len(set(hashable_samples)), 99)

    # doesn't go into infinite loops
    params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
    sampler = ParameterSampler(params_distribution, n_iter=7)
    samples = list(sampler)
    assert_equal(len(samples), 7)
    def test_list_of_rvs(self):
        emissions = np.ones((3, 7))
        tmat = np.eye(3)
        durations = [
            bernoulli(0.1, loc=1),
            bernoulli(0.1, loc=3),
            bernoulli(0.1, loc=5),
        ]

        hsmm = HSMMModel(MultinomialEmissions(emissions), durations, tmat)

        discrete_durations = np.zeros((3, 100))
        discrete_durations[(0, 1, 2), (0, 2, 4)] = 0.9
        discrete_durations[(0, 1, 2), (1, 3, 5)] = 0.1

        np.testing.assert_array_almost_equal(
            hsmm._durations, discrete_durations
        )
def epsilon_greedy(self, epsilon=0.1):
    '''
    Pick a bandit uniformly at random epsilon percent of the time.
    Otherwise pick the bandit with the best observed proportion of winning.
    Return the index of the winning bandit.
    '''
    if stats.bernoulli(epsilon).rvs() == 1:
        return random_choice(self)
    else:
        return max_mean(self)
Пример #9
0
 def test_rvs(self):
     vals = stats.bernoulli.rvs(0.75, size=(2, 50))
     assert numpy.all(vals >= 0) & numpy.all(vals <= 1)
     assert numpy.shape(vals) == (2, 50)
     assert vals.dtype.char in typecodes["AllInteger"]
     val = stats.bernoulli.rvs(0.75)
     assert isinstance(val, int)
     val = stats.bernoulli(0.75).rvs(3)
     assert isinstance(val, numpy.ndarray)
     assert val.dtype.char in typecodes["AllInteger"]
Пример #10
0
def Bernoulli(p, tag=None):
    """
    A Bernoulli random variate
    
    Parameters
    ----------
    p : scalar
        The probability of success
    """
    assert 0<p<1, 'Bernoulli probability "p" must be between zero and one, non-inclusive'
    return uv(ss.bernoulli(p), tag=tag)
Пример #11
0
def test_decode_options():
    """Test some other options for the decode function."""
    model = GaussianNB()
    mvpa._decode_subject(dataset, model)
    mvpa._decode_subject(dataset_3d, model)
    splits = stats.bernoulli(.5).rvs(24)
    mvpa._decode_subject(dataset, model, cv="sample")
    mvpa._decode_subject(dataset, model, cv=5)
    mvpa._decode_subject(dataset, model, split_pred=splits)
    mvpa._decode_subject(dataset, model, trialwise=True)
    mvpa._decode_subject(dataset, model, logits=True)
Пример #12
0
	def proposal(self, proc):
		if self.is_family(proc, rd.distributions.bernoulli_gen):
			xp = rd.bernoulli(0.5).rvs()
			F = R = np.log(0.5)
		elif self.is_family(proc, rd.distributions.norm_gen) or self.is_family(proc, normal_func):
			rng = rd.norm(proc.val, np.sqrt(proc.var()))
			xp = rng.rvs()
			F = R = np.log(rng.pdf(xp))
		elif self.is_family(proc, gamma_func):
			rng = rd.norm(proc.val, np.sqrt(proc.var()))
			xp = rng.rvs()
			F = R = np.log(rng.pdf(xp))
		else:
			xp = proc.rvs()
			F = np.log(proc.pdf(xp))
			R = np.log(proc.pdf(proc.val))
		return (xp, F, R)
Пример #13
0
def test_decode_shapes():
    """Test that we get expected shapes from decode function."""
    model = GaussianNB()
    accs = mvpa._decode_subject(dataset, model)
    assert_equal(accs.shape, (1,))
    accs = mvpa._decode_subject(dataset_3d, model)
    assert_equal(accs.shape, (4,))

    splits = stats.bernoulli(.5).rvs(24)
    accs = mvpa._decode_subject(dataset, model, split_pred=splits)
    assert_equal(accs.shape, (2,))
    accs = mvpa._decode_subject(dataset_3d, model, split_pred=splits)
    assert_equal(accs.shape, (4, 2))

    accs = mvpa._decode_subject(dataset, model, trialwise=True)
    assert_equal(accs.shape, (len(dataset["y"]),))
    accs = mvpa._decode_subject(dataset_3d, model, trialwise=True)
    assert_equal(accs.shape, (4, len(dataset["y"])))
Пример #14
0
def test_randomize_classifier():
    """Test basic functions of randomize_classifier."""
    data = dict(X=spstats.norm(0, 1).rvs((100, 12)),
                y=spstats.bernoulli(.5).rvs(100),
                runs=np.repeat([0, 1], 50))
    model = GaussianNB()
    p_vals, perm_vals = stat.randomize_classifier(data, model,
                                                  return_dist=True)
    p_min, p_max = p_vals.min(), p_vals.max()
    perm_mean = perm_vals.mean()

    # Test that the p value are well behaved
    nose.tools.assert_greater_equal(1, p_max)
    nose.tools.assert_greater_equal(p_min, 0)

    # Test that the mean is close to chance (this is probabilistic)
    nose.tools.assert_greater(.1, np.abs(perm_mean - 0.5))

    # Test that the distribution looks normal (this is probabilistic)
    val, p = spstats.normaltest(perm_vals)
    nose.tools.assert_greater(p, 0.001)
Пример #15
0
	def connectivityMatrixNew(self):

		print 'Patterns created. N patterns:',self.p
		patterns_pre=self.g(self.patterns_fr)
		patterns_post=self.f(self.patterns_fr)	
		#creating  connectivity with sparse matrices
		rv=bernoulli(1).rvs
		#connectivity=sparse.csr_matrix(sparse.random(self.N,self.N,density=self.c,data_rvs=rv))
		indexes=sparse.find(sparse.random(self.N,self.N,density=self.c,data_rvs=rv))
		print 'Connectivity created. N patterns:',self.p
		
		#finding the non zero entries
		#index_row=sparse.find(connectivity)[0]
		#index_col=sparse.find(connectivity)[1]
		
		# smart way to write down the outer product learning
		connectivity=(self.Amp/(self.c*self.N))*np.einsum('ij,ij->j',patterns_post[:,indexes[0]],patterns_pre[:,indexes[1]])
		connectivity=sparse.csr_matrix((connectivity,(indexes[0],indexes[1])),shape=(self.N,self.N))
		'Connectivity loaded with patterns. N patterns:',self.p

		self.connectivity=connectivity
Пример #16
0
    def gen_data_anti(t=.3, a=2., v_pro=1., v_stop=1., v_anti=1.,
                      t_anti=1., p_stop=1.):
        from scipy.stats import bernoulli
        if t < 0 or a < 0 or v_pro < 0 or v_anti < 0 or t_anti < 0 or p_stop < 0 or p_stop > 1:
            return None

        func = likelihoods.fast_invgauss
        x_pro = copy(func(t, a, v_pro, accum=0))
        x_anti = func(t + t_anti, a, v_anti, accum=1)
        x_stop = func(t, a, v_stop, accum=2)
        if p_stop < 1:
            stop = bernoulli(p_stop).rvs(x_stop.shape)
            x_stop[np.logical_not(stop)] = np.inf

        x_pro[x_pro > x_stop] = np.inf

        idx = x_pro > x_anti
        x_pro[idx] = -x_anti[idx]
        data = x_pro

        return data
Пример #17
0
from scipy import stats
stats.uniform(0, 1).rvs()
stats.bernoulli(0.6).rvs()

#plots
------------------------------------------
import matploblib.pyplot as plt
%matplotlib inline
plt.style.use('ggplot')

from pandas.plotting import scatter_matrix
#diagonal='kde' changes 斜向的 histogram 改成了线 图
_ = scatter_matrix(concrete, alpha=0.2, figsize=(20, 20), diagonal='kde')

#changes pandas series to numpy array
y = grad['admit'].values
variable_names = ['gre', 'gpa', 'rank']

# generate jitter, 数量和 df 数据行数一样 len(grad)
jitter = stats.uniform(-0.03,0.06).rvs(len(grad))
fig, axs = plt.subplots(1, 3, figsize = (14, 4))

#variable_names 一定要有3 个,因为 axs 有 1行,3 个
for variable, ax in zip(variable_names, axs.flatten()):
    ax.scatter(grad[variable], y + jitter, s=10, alpha=0.1)
    ax.text(0.1, 0.4, 'Ride the light!', fontsize = 35)
fig.tight_layout()
------------------------------------------
#provide random test data

#Random values in a given shape.
Пример #18
0
# <nbformat>3.0</nbformat>

# <headingcell level=1>

# [最大似然估计](http://nbviewer.ipython.org/github/rlabbe/Python-for-Signal-Processing/blob/master/Maximum_likelihood.ipynb)

# <codecell>

from __future__ import  division
from scipy.stats import bernoulli
import numpy as np

# <codecell>

p_true=1/2 # this is the value we will try to estimate from the observed data
fp=bernoulli(p_true)

def sample(n=10):
    'simulate coin flipping'
    return fp.rvs(n)# flip it n times

xs = sample(100) # generate some samples

# <codecell>

import sympy
from sympy.abc import x, z
p=sympy.symbols('p',positive=True)
sympy.init_printing()

# <codecell>
Пример #19
0
 def _scipy(self, loc=0.0, scale=1.0):
     return ss.bernoulli(p=self.p, loc=loc)
Пример #20
0
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 24 22:16:17 2014
在scipy.stats中,有直接表达伯努利分布的函数bernoulli
。事实上,在scipy.stats中,有许多常见的分布函数
@author: Administrator
"""

from scipy.stats import bernoulli
rv = bernoulli(0.8)
x = [-1, 0, 1, 2]
print(rv.cdf(x))
Пример #21
0
 def flip_coin(self):
     return int(scs.bernoulli(self.coin).rvs(1))
Пример #22
0
def test_trialwise_split_exclusivity():
    """Test that we can't split predictions and get trialwise scores."""
    model = GaussianNB()
    splits = stats.bernoulli(.5).rvs(24)
    mvpa._decode_subject(dataset, model, split_pred=splits, trialwise=True)
Пример #23
0
# coding:utf-8
from scipy.stats import bernoulli

import matplotlib.pyplot as plt

import numpy as np
import seaborn as sns

p = 0.0001
log = []
for i in range(100):
    print(i)
    tmp = bernoulli(p).rvs(size=10 ** 6)
    log = + (np.diff(np.where(tmp == 1)[0]))
print(len(log))
# print np.mean(log)
# print np.var(log)

sns.distplot(log)
plt.show()
Пример #24
0
## declare variables
font_size = 11
font_name = 'sans-serif'
n = 10000
fig = plt.figure(figsize=(10,6))
splot = 0

## loop through parameterizations of the beta
for p in [0.3,0.6,0.9]:
    splot += 1
    ax = fig.add_subplot(1,3,splot)
    
    x = np.arange(scs.bernoulli.ppf(0.01, p),scs.bernoulli.ppf(0.99, p)+1)
    ax.plot(x, scs.bernoulli.pmf(x, p), 'bo', ms=8, label='pmf')
    ax.vlines(x, 0, scs.bernoulli.pmf(x, p), colors='b', lw=5, alpha=0.5)
    rv = scs.bernoulli(p)
    
    ax.set_ylim((0,1.0))
    ax.set_xlim((-0.25, 1.25))
    ax.set_title("p=%s"%(p))
    ax.set_aspect(1./ax.get_data_ratio())

    for t in ax.get_xticklabels():
        t.set_fontsize(font_size-1)
        t.set_fontname(font_name)
    for t in ax.get_yticklabels():
        t.set_fontsize(font_size-1)
        t.set_fontname(font_name)

plt.savefig("bernoulli-distn.png", dpi=400)
plt.show()
Пример #25
0
import nose.tools
import nose.tools as nt
from nose.tools import assert_equal, assert_almost_equal, raises
import pandas.util.testing as pdt

from .. import statistical as stat

rs = np.random.RandomState(sum(map(ord, "moss_stats")))

a_norm = rs.randn(100)

a_range = np.arange(101)

datasets = [
    dict(X=spstats.norm(0, 1).rvs((24, 12)),
         y=spstats.bernoulli(.5).rvs(24),
         runs=np.repeat([0, 1], 12)) for i in range(3)
]

datasets_3d = [
    dict(X=spstats.norm(0, 1).rvs((4, 24, 12)),
         y=spstats.bernoulli(.5).rvs(24),
         runs=np.repeat([0, 1], 12)) for i in range(3)
]


def test_bootstrap():
    """Test that bootstrapping gives the right answer in dumb cases."""
    a_ones = np.ones(10)
    n_boot = 5
    out1 = stat.bootstrap(a_ones, n_boot=n_boot)
Пример #26
0
beta = 1
p = 0.75

width = 0.25
lower = -3
upper = 3
numBins = int((upper - lower) / (width)) + 1

interval = np.array(
    [-10**10, -2.5, -2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2, 2.5, 10**10])
standard = np.zeros(len(interval) - 1)

relative_freq = np.zeros(len(interval) - 1)

for n in [5, 15, 50]:
    Zs = [uniform(-1, 1, n, N), gamma(alpha, beta, n, N), bernoulli(p, n, N)]
    Z_names = ["Uniform", "Gamma", "Bernoulli"]

    for Z, name in zip(Zs, Z_names):

        for k in range(int(len(interval)) - 1):
            standard[k] = norm.cdf(interval[k + 1]) - norm.cdf(interval[k])
            relative_freq[k] = (np.sum(Z < interval[k + 1]) -
                                np.sum(Z < interval[k])) / float(len(Z))

        percentiles = norm.ppf(
            np.linspace(0.5 / float(N), (N - 0.5) / float(N), int(N)))
        sorted_Zs = np.sort(Z)

        print "Data for %s Distribution, n = %g:" % (name, n)
        print "-----------------------------"
Пример #27
0
    jax_dist = dist.MultivariateNormal(loc, cov, prec, tril)
    mean = jax_dist.mean
    cov = jax_dist.covariance_matrix
    return osp.multivariate_normal(mean=mean, cov=cov)


def _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):
    jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)
    mean = jax_dist.mean
    cov = jax_dist.covariance_matrix
    return osp.multivariate_normal(mean=mean, cov=cov)


_DIST_MAP = {
    dist.BernoulliProbs:
    lambda probs: osp.bernoulli(p=probs),
    dist.BernoulliLogits:
    lambda logits: osp.bernoulli(p=_to_probs_bernoulli(logits)),
    dist.Beta:
    lambda con1, con0: osp.beta(con1, con0),
    dist.BinomialProbs:
    lambda probs, total_count: osp.binom(n=total_count, p=probs),
    dist.BinomialLogits:
    lambda logits, total_count: osp.binom(n=total_count,
                                          p=_to_probs_bernoulli(logits)),
    dist.Cauchy:
    lambda loc, scale: osp.cauchy(loc=loc, scale=scale),
    dist.Chi2:
    lambda df: osp.chi2(df),
    dist.Dirichlet:
    lambda conc: osp.dirichlet(conc),
Пример #28
0
 def __init__(self, theta):
     self._bernoulli = bernoulli(1 - theta)
Пример #29
0
 def __init__(self, parameters, *args, **kwargs):
     self.probability_chronic = parameters.probability_chronic
     distn = bernoulli(self.probability_chronic)
     super()._copyattrs(distn)
Пример #30
0
def calc_likelihood(x, mu):
    return bernoulli(mu).pmf(x)
Пример #31
0
 def __init__(self, p):
     self.bernoulli = stats.bernoulli(p)
     self.mean = self.bernoulli.mean()  # = p
Пример #32
0
def generate_data(N_A,
                  N_B,
                  p_A,
                  p_B,
                  days=None,
                  control_label='A',
                  test_label='B'):
    """Returns a pandas dataframe with fake CTR data
    Example:
    Parameters:
        N_A (int): sample size for control group
        N_B (int): sample size for test group
            Note: final sample size may not match N_A provided because the
            group at each row is chosen at random (50/50).
        p_A (float): conversion rate; conversion rate of control group
        p_B (float): conversion rate; conversion rate of test group
        days (int): optional; if provided, a column for 'ts' will be included
            to divide the data in chunks of time
            Note: overflow data will be included in an extra day
        control_label (str)
        test_label (str)
    Returns:
        df (df)
    """

    # initiate empty container
    data = []

    # total amount of rows in the data
    N = N_A + N_B

    p_B = p_B + p_A

    group_bern = scs.bernoulli(0.5)

    # initiate bernoulli distributions to randomly sample from
    A_bern = scs.bernoulli(p_A)
    B_bern = scs.bernoulli(p_B)

    for idx in range(N):
        # initite empty row
        row = {}
        # for 'ts' column
        if days is not None:
            if type(days) == int:
                row['ts'] = idx // (N // days)
            else:
                raise ValueError("Provide an integer for the days parameter.")
        # assign group based on 50/50 probability
        row['group'] = group_bern.rvs()

        if row['group'] == 0:
            # assign conversion based on provided parameters
            row['converted'] = A_bern.rvs()
        else:
            row['converted'] = B_bern.rvs()
        # collect row into data container
        data.append(row)

    # convert data into pandas dataframe
    df = pd.DataFrame(data)

    # transform group labels of 0s and 1s to user-defined group labels
    df['group'] = df['group'].apply(lambda x: control_label
                                    if x == 0 else test_label)

    return df
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pymc3 as pm
from scipy.stats import bernoulli

# Config
os.chdir("/home/jovyan/work")
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
plt.rcParams["figure.figsize"] = (10, 5)
np.random.seed(42)

# Prepare the data
y = bernoulli(0.22).rvs(10)
print(y)
chains = 1000 * 2**np.arange(6)
cols = ['mean', 'sd', 'mc_error', 'hpd_2.5', 'hpd_97.5', 'n_eff', 'Rhat']
df_summaries = pd.DataFrame(columns=cols)

# Inferece
for chain in chains:
    with pm.Model() as model:
        theta = pm.Beta("theta", alpha=1, beta=1)
        throw = pm.Bernoulli("throw", theta, observed=y)
        trace = pm.sample(chain, step=pm.Metropolis(), progressbar=False)
        df_summaries = df_summaries.append(pm.summary(trace))

# Calculate the HPD interval range
df_summaries["hpd"] = df_summaries["hpd_97.5"] - df_summaries["hpd_2.5"]
Пример #34
0
 def flip_coin(self):
     return bernoulli(self.likelihood[self.coin]).rvs(1)[0]
 def createBernoulliArmsFromMeans(self, means):
     # Creates an instance of a bandit problem with Bernoulli distributions on each arm, where minGap denotes the minimum gap between the mean of two arms.
     self.bestarm = np.argmax(means)
     for a in range(0, self.A):
         self.armDistributions.append(ss.bernoulli(means[a]))
         self.armMeans.append(means[a])
Пример #36
0
 def sample(self, x, random_state=None):
     p = self.mean(x)
     return st.bernoulli(p).rvs(random_state=random_state)
Пример #37
0
    # draw initial sample from uniform
    x_t = (uniform().rvs(size=3)*256).astype(int)

    for numQ in range(120):
        # draw sample from proposal distribution, 
        x_proposal = multivariate_normal(x_t,cov_prop).rvs().astype(int)
        
        # resample from proposal distribution if out-of-bounds
        while any(xi<0 or xi>255 for xi in x_proposal):
            x_proposal = multivariate_normal(x_t,cov_prop).rvs().astype(int)
            
        # accept with probability A(x_proposal,x_t)
        A = targetdist.pdf(x_proposal)/(targetdist.pdf(x_proposal)+targetdist.pdf(x_t))
        
        # accept proposal
        if bernoulli(A).rvs() ^ bernoulli(error_rate).rvs():
            # new MCMC state
            x_t = x_proposal
            # start sampling after 30th proposal
            if numQ>burn_in_time:
                samples.append(x_proposal)
            

# MCMC samples and color components
samples = np.array(samples)
R,G,B = samples.T

# sample statistics on individual component distributions
print "Mean: ({0:.2f},{1:.2f},{2:.2f})".format(np.mean(R),np.mean(G),np.mean(B))
print "Std: ({0:.2f},{1:.2f},{2:.2f})".format(np.std(R),np.std(G),np.std(B))
Пример #38
0
 def __init__(self, theta):
     self._bernoulli = bernoulli(0.1)
     self._theta = theta
Пример #39
0
import matplotlib.pyplot as plt
import csv

#Guardar datos en un list
with open('bits10k.csv', newline='') as csvfile:
  data = list(csv.reader(csvfile))

bits = [0]*len(data)
for c in range(0,10000):
  bits[c]=int(data[c][0])

# Número de bits
N = len(bits)

# Variable aleatoria binaria equiprobable
X = stats.bernoulli(0.5)

# Generar bits para "transmitir"
#bits = X.rvs(N)

'''
Punto 1

'''

# Frecuencia de operación
f = 5000 # Hz

# Duración del período de cada símbolo (onda)
T = 1/f # 1 ms
Пример #40
0
sigma = 0.5
norm_rv = sts.norm(loc=mu, scale=sigma)
x = norm_rv.rvs(size=4)  # [2.42471807,  2.89001427,  1.5406754 ,  2.218372]

# Generate uniform distribution

a = 1
b = 4
uniform_rv = sts.uniform(a, b - a)
x = uniform_rv.rvs(
    size=4)  # [2.90068986,  1.30900927,  2.61667386,  1.82853085]

# Generate Bernoulli distribution

p = 0.7
bernoulli_rv = sts.bernoulli(p)
x = bernoulli_rv.rvs(size=4)  # [1, 1, 1, 0]

# Generate binomial distribution

n = 20
p = 0.7
binom_rv = sts.binom(n, p)
x = binom_rv.rvs(size=4)  # [13, 15, 13, 14]

# Generate Poisson distribution

lam = 5
poisson_rv = sts.poisson(lam)
x = poisson_rv.rvs(size=4)  # [6, 10,  4,  4]
Пример #41
0
    x_t = (uniform().rvs(size=3) * 256).astype(int)

    for numQ in range(120):
        # draw sample from proposal distribution,
        x_proposal = multivariate_normal(x_t, cov_prop).rvs().astype(int)

        # resample from proposal distribution if out-of-bounds
        while any(xi < 0 or xi > 255 for xi in x_proposal):
            x_proposal = multivariate_normal(x_t, cov_prop).rvs().astype(int)

        # accept with probability A(x_proposal,x_t)
        A = targetdist.pdf(x_proposal) / (targetdist.pdf(x_proposal) +
                                          targetdist.pdf(x_t))

        # accept proposal
        if bernoulli(A).rvs() ^ bernoulli(error_rate).rvs():
            # new MCMC state
            x_t = x_proposal
            # start sampling after 30th proposal
            if numQ > burn_in_time:
                samples.append(x_proposal)

# MCMC samples and color components
samples = np.array(samples)
R, G, B = samples.T

# sample statistics on individual component distributions
print "Mean: ({0:.2f},{1:.2f},{2:.2f})".format(np.mean(R), np.mean(G),
                                               np.mean(B))
print "Std: ({0:.2f},{1:.2f},{2:.2f})".format(np.std(R), np.std(G), np.std(B))
Пример #42
0
def make_sample(M=1000):
    log = np.sum(bernoulli(p).rvs(size=[N, M]), axis=0)
    return log
Пример #43
0
    # Use a zero input and the fwd transform to get the shape of
    # the pyramid easily
    f_tf = Transform2d(biort=biort, qshift=qshift)
    p_tf = f_tf.forward(im, nlevels=4, include_scale=True)

    # Create ops for the inverse transform
    X_tf = f_tf.inverse(p_tf)

    np.testing.assert_array_almost_equal(
        X_np, X_tf, decimal=PRECISION_DECIMAL)


@skip_if_no_tf
@pytest.mark.parametrize("biort,qshift,gain_mask", [
    ('antonini','qshift_c',stats.bernoulli(0.8).rvs(size=(6,4))),
    ('near_sym_a','qshift_a',stats.bernoulli(0.8).rvs(size=(6,4))),
    ('legall','qshift_c',stats.bernoulli(0.8).rvs(size=(6,4))),
    ('near_sym_b','qshift_06',stats.bernoulli(0.8).rvs(size=(6,4))),
    ('near_sym_b_bp', 'qshift_b_bp',stats.bernoulli(0.8).rvs(size=(6,4)))
])
def test_results_match_invmask(biort,qshift,gain_mask):
    im = mandrill

    f_np = Transform2d_np(biort=biort, qshift=qshift)
    p_np = f_np.forward(im, nlevels=4, include_scale=True)
    X_np = f_np.inverse(p_np, gain_mask)

    f_tf = Transform2d(biort=biort, qshift=qshift)
    p_tf = f_tf.forward(im, nlevels=4, include_scale=True)
    X_tf = f_tf.inverse(p_tf, gain_mask)
Пример #44
0
 def __init__(self, mu1, K1, p, mu2, K2):
     assert (p > 0 and p < 1)
     self.p = stats.bernoulli(p)
     self.d1 = mvnorm(mu1, K1)
     self.d2 = mvnorm(mu2, K2)
Пример #45
0
def generate(prob, nRow, nCol):
    dist = stats.bernoulli(prob)
    matrix = dist.rvs((nRow + 2, nCol))
    matrix[0, :] = 1
    matrix[nRow + 1, :] = 1
    return matrix
Пример #46
0
def test_estimate_pauli_sum(qvm):
    """
    Full test of the estimation procedures
    """
    # type checks
    with pytest.raises(TypeError):
        estimate_pauli_sum('5', {0: 'X', 1: 'Z'}, Program(), 1.0E-3, qvm)

    with pytest.raises(CommutationError):
        estimate_pauli_sum([sX(0), sY(0)], {
            0: 'X',
            1: 'Z'
        }, Program(), 1.0E-3, qvm)

    with pytest.raises(TypeError):
        estimate_pauli_sum(sX(0), {0: 'X', 1: 'Z'}, Program(), 1.0E-3, qvm)

    # mock out qvm
    np.random.seed(87655678)
    brv1 = bernoulli(p=0.25)
    brv2 = bernoulli(p=0.4)
    n = 500
    two_qubit_measurements = list(zip(brv1.rvs(size=n), brv2.rvs(size=n)))
    pauli_terms = [sZ(0), sZ(1), sZ(0) * sZ(1)]

    fakeQVM = Mock(spec=QVMConnection())
    fakeQVM.run = Mock(return_value=two_qubit_measurements)
    mean, means, cov, estimator_var, shots = estimate_pauli_sum(
        pauli_terms, {
            0: 'Z',
            1: 'Z'
        },
        Program(),
        1.0E-1,
        fakeQVM,
        symmetrize=False)
    parity_results = np.zeros((len(pauli_terms), n))
    parity_results[0, :] = [-2 * x[0] + 1 for x in two_qubit_measurements]
    parity_results[1, :] = [-2 * x[1] + 1 for x in two_qubit_measurements]
    parity_results[2, :] = [
        -2 * (sum(x) % 2) + 1 for x in two_qubit_measurements
    ]

    assert np.allclose(np.cov(parity_results, ddof=1), cov)
    assert np.isclose(np.sum(np.mean(parity_results, axis=1)), mean)
    assert np.allclose(np.mean(parity_results, axis=1), means)
    assert np.isclose(shots, n)
    variance_to_beat = np.sum(cov) / (n - 1)
    assert np.isclose(variance_to_beat, estimator_var)

    # Double the shots by ever so slightly decreasing variance bound
    double_two_q_measurements = two_qubit_measurements + two_qubit_measurements
    mean, means, cov, estimator_var, shots = estimate_pauli_sum(
        pauli_terms, {
            0: 'Z',
            1: 'Z'
        },
        Program(),
        variance_to_beat - 1.0E-8,
        fakeQVM,
        symmetrize=False)

    parity_results = np.zeros((len(pauli_terms), 2 * n))
    parity_results[0, :] = [-2 * x[0] + 1 for x in double_two_q_measurements]
    parity_results[1, :] = [-2 * x[1] + 1 for x in double_two_q_measurements]
    parity_results[2, :] = [
        -2 * (sum(x) % 2) + 1 for x in double_two_q_measurements
    ]

    assert np.allclose(np.cov(parity_results, ddof=1), cov)
    assert np.isclose(np.sum(np.mean(parity_results, axis=1)), mean)
    assert np.allclose(np.mean(parity_results, axis=1), means)
    assert np.isclose(shots, 2 * n)
    assert np.isclose(np.sum(cov) / (2 * n - 1), estimator_var)
Пример #47
0
 def __init__(self, p):
     self.p = p
     self.noise_distr = bernoulli(p)
Пример #48
0
def testBernoulli():
    from scipy.stats import bernoulli
    #bernoulli random variable
    brv=bernoulli(p=0.3)
    sample = brv.rvs(size=20)
    print(sample)
Пример #49
0
def getBanditArms(numArms):
    armMeans = np.random.rand(numArms)
    banditArms = [bernoulli(armMeans[i]) for i in range(numArms)]
    return banditArms
Пример #50
0
    def sample(self, x, random_state=None):
        import scipy.stats as st

        p = self.mean(x)
        return _aca(st.bernoulli(p).rvs(random_state=random_state))
Пример #51
0
    def __new__(cls, jax_dist, *params):
        sp_dist = None
        if jax_dist in _DIST_MAP:
            sp_dist = _DIST_MAP[jax_dist]
        return super(cls, T).__new__(cls, jax_dist, sp_dist, params)


def _mvn_to_scipy(loc, cov, prec, tril):
    jax_dist = dist.MultivariateNormal(loc, cov, prec, tril)
    mean = jax_dist.mean
    cov = jax_dist.covariance_matrix
    return osp.multivariate_normal(mean=mean, cov=cov)


_DIST_MAP = {
    dist.BernoulliProbs: lambda probs: osp.bernoulli(p=probs),
    dist.BernoulliLogits: lambda logits: osp.bernoulli(p=_to_probs_bernoulli(logits)),
    dist.Beta: lambda con1, con0: osp.beta(con1, con0),
    dist.BinomialProbs: lambda probs, total_count: osp.binom(n=total_count, p=probs),
    dist.BinomialLogits: lambda logits, total_count: osp.binom(n=total_count, p=_to_probs_bernoulli(logits)),
    dist.Cauchy: lambda loc, scale: osp.cauchy(loc=loc, scale=scale),
    dist.Chi2: lambda df: osp.chi2(df),
    dist.Dirichlet: lambda conc: osp.dirichlet(conc),
    dist.Exponential: lambda rate: osp.expon(scale=np.reciprocal(rate)),
    dist.Gamma: lambda conc, rate: osp.gamma(conc, scale=1./rate),
    dist.HalfCauchy: lambda scale: osp.halfcauchy(scale=scale),
    dist.HalfNormal: lambda scale: osp.halfnorm(scale=scale),
    dist.LogNormal: lambda loc, scale: osp.lognorm(s=scale, scale=np.exp(loc)),
    dist.MultinomialProbs: lambda probs, total_count: osp.multinomial(n=total_count, p=probs),
    dist.MultinomialLogits: lambda logits, total_count: osp.multinomial(n=total_count,
                                                                        p=_to_probs_multinom(logits)),
Пример #52
0
### Distribuciones de probabilidad

## Distribucion de Bernoulli

from scipy import stats

p = 0.5
bernoulliDist = stats.bernoulli(p)

p_tails = bernoulliDist.pmf(0) 
p_heads = bernoulliDist.pmf(1)

print(p_tails, p_heads)

#Generacion de Aleatorios

trials = bernoulliDist.rvs(10)
print(trials)

## Distribucion Binomial

(p, num) = (0.5, 4)
binomDist = stats.binom(num, p)

print(binomDist.pmf(np.arange(5)))

## Distribucion Normal

from scipy.stats import norm

# Generacion de distribucion 
Пример #53
0
                                      KFold)

from numpy.testing import assert_array_equal, assert_array_almost_equal
import numpy.testing as npt
import nose.tools
from nose.tools import assert_equal, raises

from .. import mvpa

evs = [np.array([[6, 0, 1],
                 [18, 0, 1]]),
       np.array([[12, 0, 1],
                 [24, 0, 1]])]

dataset = dict(X=stats.norm(0, 1).rvs((24, 12)),
               y=stats.bernoulli(.5).rvs(24),
               runs=np.repeat([0, 1], 12))

dataset_3d = dict(X=stats.norm(0, 1).rvs((4, 24, 12)),
                  y=stats.bernoulli(.5).rvs(24),
                  runs=np.repeat([0, 1], 12))


def test_extract_dataset():
    """Test simple case."""
    evs = pd.DataFrame(dict(onset=[1, 2, 3],
                            condition=["foo", "foo", "bar"]),
                       dtype=float)
    ts = np.random.randn(5, 5, 5, 4)
    mask = ts[..., 0] > .5
    X, y, m = mvpa.extract_dataset(evs, ts, mask, 1)
Пример #54
0
plt.ylabel('$F(x)$')
plt.xlabel('$x$')

x = np.linspace(0, 5, 1000)
pdf = uniform_rv.pdf(x)
plt.plot(x, pdf)

plt.ylabel('$f(x)$')
plt.xlabel('$x$')

# распределение Бернулли

# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bernoulli.html

bernoulli_rv = sts.bernoulli(0.7)

b = bernoulli_rv.rvs(300)

print(abs(np.sum(b) - 300 * 0.7) / 300)

# биномиальное распределение

# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.binom.html

binomial_rv = sts.binom(20, 0.9)

x = np.linspace(0, 20, 21)
cdf = binomial_rv.cdf(x)
plt.step(x, cdf)
Пример #55
0
    rearr = lambda arr: np.moveaxis(arr, [0, 1, 2, 3, 4], [0, 4, 1, 2, 3])
    rearry = lambda arr: np.moveaxis(arr, [0, 1, 2, 3, 4], [0, 4, 1, 2, 3]
                                     )[:, :, :, :, :1]
    X = rearr(x_test)
    Y = rearry(y_test)

    return model.keras_model.evaluate(X, Y, batch_size=batch_size)[1]


DEFAULT_DIST = {
    'n_spheres_dist': norm(loc=120, scale=5),
    'n_cubes_dist': norm(loc=0, scale=0),
    'n_cylinders_dist': norm(loc=18, scale=5),
    'psf_lateral_sigma_dist': uniform(loc=0, scale=1),
    'psf_axial_sigma_dist': uniform(loc=0.5, scale=4),
    'use_poisson_dist': bernoulli(0.75),
    'subsample_factor_dist': uniform(loc=0.5, scale=0.5),
    'gauss_noise_sigma_dist': uniform(loc=0, scale=0.42)
}


def gen_model_eval_data(params: Dict[str, Union[List[float], np.ndarray]],
                        shape: int,
                        use_noise: bool,
                        use_psf: bool,
                        use_subsampling: bool,
                        samples_per_param: int = 18,
                        dists=DEFAULT_DIST):
    """
    Analyze how the performance of a CARE model is affected by different parameters of degradation and image content
Пример #56
0
#!/usr/bin/env python
"""stats_bernoulli.py: Demonstrate how ``scipy.stats.bernoulli`` works.
"""
from scipy.stats import bernoulli

# pylint: disable=invalid-name

# Let p be the probability of the coin landing heads.
p = 0.5
rv = bernoulli(p)

# Compute moments.
mean, var, skew, kurt = rv.stats(moments='mvsk')

# mean == p.
print(f"mean: {float(mean):.3f}")

# median == 0.5 (if p == 0.5)
print(f"median: {rv.median():.3f}")

# var == p * (1 - p)
print(f"var: {float(var):.3f}")

# skew == (1 - 2 * p)/np.sqrt(p * (1 - p))
print(f"skew: {float(skew):.3f}")

# kurt == -2 (if p == 0.5)
print(f"kurt: {float(kurt):.3f}")

# std == np.sqrt(var)
print(f"std: {rv.std():.3f}")
Пример #57
0
np.set_printoptions(formatter={'all':lambda x: '%.3f' % x})

# https://people.duke.edu/~ccc14/sta-663/EMAlgorithm.html

##################### complete information #######################################
#  https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.binom.html
def neg_loglik(thetas, n, xs, zs):
    return -np.sum([binom(n, thetas[z]).logpmf(x) for (x, z) in zip(xs, zs)])

m = 10
theta_A = 0.8
theta_B = 0.3
theta_0 = [theta_A, theta_B]

#  https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.bernoulli.html
coin_A = bernoulli(theta_A)
coin_B = bernoulli(theta_B)

# A,A,B,A,B.
xs = map(sum, [coin_A.rvs(m), coin_A.rvs(m), coin_B.rvs(m), coin_A.rvs(m), coin_B.rvs(m)])
zs = [0, 0, 1, 0, 1]


xs = np.array(xs)
print 'xs=',xs
ml_A = np.sum(xs[[0,1,3]])/(3.0*m)
ml_B = np.sum(xs[[2,4]])/(2.0*m)
print 'ml_A,ml_B:',ml_A, ml_B

bnds = [(0,1), (0,1)]
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html
Пример #58
0
#!/usr/bin/python3

# import libraries
import pymc3 as pm
from scipy import stats
import matplotlib.pyplot as plt

# this is PyMC3 notation
# essentially model initialisation in PyMC3 is done into the model fitting portion of the
# code and there is no model building as in PyMC2

with pm.Model() as trafficLight_model:

    # define our data - PyMC3 does not like map objects
    data = stats.bernoulli(0.2).rvs(100000)

    # similar as in PyMC2
    theta = pm.Beta("theta", alpha=1.0, beta=1.0)
    # "observed" replaces "value"
    color = pm.Bernoulli("color", p=theta, observed=data)

    # define iteration start
    start = pm.find_MAP()

    # MCMC in PyMC3
    step = pm.Metropolis()
    trace = pm.sample(1e4, start=start, step=step, model=trafficLight_model)

# show our amazing results
pm.traceplot(trace[0:])
plt.show()
Пример #59
0
def test(n=100, p=0.7):
    '''
    bernoulli distribution

    n - розмір вибірки
    p - ймовірність одиниці

    Повертає список вигляду [згенерована вибірка, список x, список y, середнє, мода, медіана,
    розмах, девіація, варіанса, стандарт, варіація, асиметрія, ексцес]
    '''

    distribution = st.bernoulli(p)
    sample = list(distribution.rvs(size=n))

    for i in range(len(sample)):
        sample[i] = round(sample[i], 2)

    var = list(sample)
    var.sort()
    x = list(set(sample))
    y = list()
    x.sort()

    freq_table = dict()

    for num in x:
        freq_table[num] = sample.count(num)

    int_len = ((max(sample) - min(sample)) / r(sample))
    int_bounds = list()
    next = min(sample)

    for i in range(r(sample)):
        int_bounds.append(round(next, 2))
        next += int_len

    int_bounds.append(max(sample))

    freq_table = dict()
    int_list = list()

    for i in range(len(int_bounds) - 1):
        int_list.append([int_bounds[i], int_bounds[i + 1]])

    for i in range(len(int_list)):
        if i != len(int_list) - 1:
            freq_table["[" + str(int_list[i][0]) + "; " + str(int_list[i][1]) +
                       ")"] = 0
        else:
            freq_table["[" + str(int_list[i][0]) + "; " + str(int_list[i][1]) +
                       "]"] = 0

    for i in range(len(sample)):
        for j in range(len(int_list)):
            if sample[i] >= int_list[j][0] and sample[i] < int_list[j][
                    1] and j != len(int_list) - 1:
                freq_table["[" + str(int_list[j][0]) + "; " +
                           str(int_list[j][1]) + ")"] += 1

            elif sample[i] >= int_list[j][0] and sample[i] <= int_list[j][
                    1] and j == len(int_list) - 1:
                freq_table["[" + str(int_list[j][0]) + "; " +
                           str(int_list[j][1]) + "]"] += 1

    int_list_values = list()
    for key in freq_table:
        int_list_values.append(int(freq_table[key]))

    intr = list(freq_table.keys())

    centered_int = list()
    for intr in int_list:
        centered_int.append(round(((intr[0] + intr[1]) / 2), 3))

    freq_table_disc = dict()
    x = list(set(sample))
    for num in x:
        freq_table_disc[num] = sample.count(num)

    result = list()
    result.append(sample)

    x = [0, 1]
    y = [1 - p, p]

    result.append(x)
    result.append(y)

    mean = np.mean(sample)
    result.append(mean)

    moda = list(mode(freq_table_disc).keys())
    result.append(moda)

    med = statistics.median(sample)
    result.append(med)

    ro = max(sample) - min(sample)
    result.append(ro)

    deviation = dev(freq_table_disc)
    result.append(deviation)

    variansa = dev(freq_table_disc) / (len(sample) - 1)
    result.append(variansa)

    standart = math.sqrt(variansa)
    result.append(standart)

    variation = standart / np.mean(sample)

    asym = st.skew(sample)
    result.append(asym)

    ex = st.kurtosis(sample)
    result.append(ex)

    return result
# -*- coding: utf-8 -*-
"""
Created on Sun Dec  8 11:41:19 2019

@author: Yao
"""

from scipy.stats import bernoulli

p = 1.0/2 #需要估计的目标
sample = bernoulli(p)
xs = sample.rvs(100) #生成100个样本
print(xs[:10]) #查看前10个生成样本

import sympy
import numpy as np
x, p, z = sympy.symbols('x p z', positive = True)
phi = p ** x * (1-p) ** (1-x)
L = np.prod([phi.subs(x,i)for i in xs])
print(L)

logL = sympy.expand_log(sympy.log(L))
print(logL)
sol, = sympy.solve(sympy.diff(logL, p),p) #求解
print(sol)