Exemplo n.º 1
0
def randomly_draw_parameters(state_remain_mean=.99, state_remain_sd=.03,
                   normal_emission_mean=.95, normal_emission_sd=.05,
                   event_emission_mean=.6, event_emission_sd=.05):
                   
    alpha, beta = beta_method_of_moments(state_remain_mean, state_remain_sd)
    a1 = random.beta(alpha, beta)
    a2 = random.beta(alpha, beta)
    trans_matrix = array(([[a1, 1-a1],
                            [1-a2, a2]]))
    
    # The initial state is the steady state of the transition matrix
    # which is computed via the leading eigenvector
    w, v =sorted_eig(transpose(trans_matrix))
    initial_state = v[:,0]
    initial_state /= sum(initial_state)

    
    
    
    alpha, beta = beta_method_of_moments(normal_emission_mean, normal_emission_sd)
    a1 = random.beta(alpha, beta)
    
    alpha, beta = beta_method_of_moments(event_emission_mean, event_emission_sd)
    a2 = random.beta(alpha, beta)


    # Emission matrix - state 0 is likely to emit symbol 0, and vice versa
    # In other words, events are likely to be outliers
    emission_matrix = array(([[a1, 1-a1],
                            [1-a2, a2]]))
    
    return initial_state, trans_matrix, emission_matrix
Exemplo n.º 2
0
def montecarlo(cause, effect, unknown, n, *ignore):
    cnt_cause = count(zip(cause))
    cnt_unknown = count(zip(unknown))
    cnt_cause_effect = count(zip(cause, effect))
    cnt_effect_unknown = count(zip(effect, unknown))
    sumarr(cnt_cause_effect, 0.1) # make beta dist work with zeros
    sumarr(cnt_cause, 0.1)
    sumarr(cnt_unknown, 0.1)
    sumarr(cnt_effect_unknown, 0.1)
    cnt_cause_unknown = count(zip(cause, unknown))
    rounds = 500
    p_overall = struct(cause_unknown_chain=[[0,0],[0,0]],
                       cause_unknown_collide=[[0,0],[0,0]])
    for i in range(rounds):
        p=struct()
        p.cause = 1-beta(*cnt_cause)
        p.unknown = 1-beta(*cnt_unknown)
        p.effect_given_cause = [1-beta(*cnts) for cnts in cnt_cause_effect]
        p.unknown_given_effect = [1-beta(*cnts) for cnts in cnt_effect_unknown]
        p = get_joints_by_model(p)
        acclarr(p_overall.cause_unknown_chain, p.cause_unknown_chain)
        acclarr(p_overall.cause_unknown_collide, p.cause_unknown_collide)
    mularr(p_overall.cause_unknown_chain, 1.0/rounds)
    mularr(p_overall.cause_unknown_collide, 1.0/rounds)
    try:
        bayes_factor = get_factor(p_overall, cnt_cause_unknown)
    except ValueError:
        print '==ValueError=='
        print p_overall.__dict__
        raise ValueError()
    return struct(bayes_fwd_rev=bayes_factor)
 def operations(self,tracenode):
     self.params['IOPReduction'] = random.beta(tracenode.value[0][0],tracenode.value[0][1])
     self.medicalRecords['CurrentMedicationType'] = tracenode.value[1] 
     if self.medicalRecords['CurrentMedicationType'] <> 5:
         self.params['SideEffect'] = random.beta(tracenode.value[2][0],tracenode.value[2][1])
     else:
         self.params['SideEffect'] = 0
     self.medicalRecords['MedicationCombination'] = tracenode.value[3]
Exemplo n.º 4
0
 def SettingCorrectParameters(self,tracenode):
     if self.medicalRecords['TreatmentOverallStatus'] == 'SEorIneffective':
                     (self.medicalRecords['IncidenceReaction']).append(self.medicalRecords['CurrentMedicationType'])
     self.params['IOPReduction'] = random.beta(tracenode.value[0][0],tracenode.value[0][1])
     self.medicalRecords['CurrentMedicationType'] = tracenode.value[1] 
     if self.medicalRecords['CurrentMedicationType'] <> 5:
         self.params['SideEffect'] = random.beta(tracenode.value[2][0],tracenode.value[2][1])
     else:
         self.params['SideEffect'] = 0
     self.medicalRecords['MedicationCombination'] = tracenode.value[3]
Exemplo n.º 5
0
 def operations(self,tracenode):
     """
     Assign IOP reduction,Current med type, Side Effect and Med combi using
         the values from tree node. 
     """
     self.params['IOPReduction'] = random.beta(tracenode.value[0][0],tracenode.value[0][1])
     self.medicalRecords['CurrentMedicationType'] = tracenode.value[1] 
     if self.medicalRecords['CurrentMedicationType'] <> 5:
         self.params['SideEffect'] = random.beta(tracenode.value[2][0],tracenode.value[2][1])
     else:
         self.params['SideEffect'] = 0
     self.medicalRecords['MedicationCombination'] = tracenode.value[3]
 def __update_p(self, i, edges_in, node_pairs_in, edges_out, node_pairs_out, p_in, p_out):
     p_in_tmp = npr.beta(edges_in + self.__a_in, node_pairs_in + self.__b_in)
     if p_in_tmp > p_out[i-1]:
         p_in[i] = p_in_tmp
     else:
         p_in[i] = p_in[i-1]
     #update p_out with constraint that p_out < p_in
     p_out_tmp = npr.beta(edges_out + self.__a_out, node_pairs_out + self.__b_out)
     if p_out_tmp < p_in[i]:
         p_out[i] = p_out_tmp
     else:
         p_out[i] = p_out[i-1]
Exemplo n.º 7
0
 def generate_sample_path(self, n=1):
     deaths = self.event_table['observed']
     population = self.event_table['entrance'].cumsum() - self.event_table['removed'].cumsum().shift(1).fillna(0)
     d = deaths.shape[0]
     samples = 1. - beta(0.01 + deaths, 0.01 + population - deaths, size=(n, d))
     sample_paths = pd.DataFrame(np.exp(np.log(samples).cumsum(1)).T, index=self.timeline)
     return sample_paths
Exemplo n.º 8
0
def demo2():
	print "Demo2 : Beta + Gaussian Noise"
	x = nr.beta(1.5,1.5,[1000,1])
	y = x + nr.normal(0,0.3,[1000,1])
	print "I(X;Y) = ", ucmi.mi(x,y)
	print "UMI(X;Y) = ", ucmi.umi(x,y)
	print "CMI(X;Y) = ", ucmi.cmi(x,y)
Exemplo n.º 9
0
 def InitiateCataract(self):
     key = int((self.Attribute['Age'] -50)/5)
     cataractRisk = random.triangular(1.5,2.7,4.9)
     
     if self.medicalRecords['NumberTrabeculectomy'] == 0:  
         if self.Attribute['Gender'] == 1:
             if key < 7: 
                 RateCataract = (cataract_formation[key]/1000)
             else:
                 RateCataract = (cataract_formation[7]/1000)
         else:
             if key < 7: 
                 RateCataract = (cataract_formation_female[key]/1000)
             else:
                 RateCataract = (cataract_formation_female[7]/1000)
     else:
         if self.Attribute['Gender'] == 1:
             if key < 7: 
                 RateCataract = (cataract_formation[key]/1000)*cataractRisk
             else:
                 RateCataract = (cataract_formation[7]/1000)*cataractRisk
         else:
             if key < 7: 
                 RateCataract = (cataract_formation_female[key]/1000)*cataractRisk
             else:
                 RateCataract = (cataract_formation_female[7]/1000)*cataractRisk
                 
                 
     if random.uniform(0,1) <RateCataract:
         self.medicalRecords['Cataract'] = True
     if random.uniform(0,1) < random.beta(123,109) and self.medicalRecords['Cataract'] == True:
         self.medicalRecords['SurgeryCataract'] += 1
         self.medicalRecords['Cataract'] = False
Exemplo n.º 10
0
def generateIBP(customers, alpha=10, reducedprop=1.):
    """ Simple implementation of the Indian Buffet Process. Generates a binary matrix with
    customers rows and an expected number of columns of alpha * sum(1,1/2,...,1/customers).
    This implementation uses a stick-breaking construction.
    An additional parameter permits reducing the expected number of times a dish is tried. """
    # max number of dishes is distributed according to Poisson(alpha*sum(1/i))
    _lambda = alpha * sum(1. / array(list(range(1, customers + 1))))
    alpha /= reducedprop

    # we give it 2 standard deviations as cutoff
    maxdishes = int(_lambda + sqrt(_lambda) * 2) + 1

    res = zeros((customers, maxdishes), dtype=bool)
    stickprops = beta(alpha, 1, maxdishes) # nu_i

    currentstick = 1.
    dishesskipped = 0

    for i, nu in enumerate(stickprops):
        currentstick *= nu
        dishestaken = rand(customers) < currentstick * reducedprop
        if sum(dishestaken) > 0:
            res[:, i - dishesskipped] = dishestaken
        else:
            dishesskipped += 1

    return res[:, :maxdishes - dishesskipped]
Exemplo n.º 11
0
def generate_mmsbm_data(N, K, alpha, a, b, m=None):
    """
    N is the number of nodes
    K is the number of blocks
    alpha is the concentration parameter
    a and b are the shape parameters
    m is the base measure
    """

    if m == None:
        m = ones(K) / K # uniform base measure

    Y = zeros((N, N), dtype=int) # edges

    # sample node-specific distributions over blocks

    [theta] = dirichlet(alpha * m, (1, N))

    # sample between- and within-block edge probabilities

    phi = beta(a, b, (K, K))

    # sample block assignments and edges

    for i in range(1, N+1):
        for j in range(1, N+1):
            idx = (categorical(theta[i-1,:]), categorical(theta[j-1,:]))
            Y[i-1,j-1] = uniform() <= phi[idx]

    return theta, Y
Exemplo n.º 12
0
def make_noisy_probs(exact, noise_type, noise):
    """Noisify probabilities
    Args: exact - 2D np.array, rows are options, cols are ppl
          model_params: dict
    Outputs: 2d np.aray, rows are options, cols are ppl
    """
    if noise_type == "noiseless":
        return exact
    elif noise_type == "binomial":
        num_hypothetical_trials = noise
        num_successes = nr.binomial(num_hypothetical_trials, exact[0])
        noisy0 = num_successes / num_hypothetical_trials
        return np.array([noisy0, 1 - noisy0])
    elif noise_type == "beta":
        alpha_beta = jmutils.beta_shape(exact[0], noise)
        noisy0 = nr.beta(*alpha_beta)
        return np.array([noisy0, 1 - noisy0])
    elif noise_type == "truncnorm":
        scale = noise
        noisy0 = truncnorm.rvs(-exact[0] / scale, (1 - exact[0]) / scale, 
                               loc=exact[0], scale=scale)
        return np.array([noisy0, 1 - noisy0])
    elif noise_type == "log_odds":
        lo = np.log(exact[0] / exact[1])
        noisy_lo = nr.normal(lo, noise)
        given_1 = 1 / (math.exp(noisy_lo) + 1)
        return np.array([1 - given_1, given_1])
    else:
        print("Error: meta noise_type not specified correctly")
Exemplo n.º 13
0
def generate_sbm_data(N, K, alpha, a, b, m=None):
    """
    N is the number of nodes
    K is the number of blocks
    alpha is the concentration parameter
    a and b are the shape parameters
    m is the base measure
    """

    if m == None:
        m = ones(K) / K # uniform base measure

    Z = zeros((N, K)) # block assignments

    # sample (global) distribution over blocks

    [theta] = dirichlet(alpha * m, 1)

    # sample between- and within-block edge probabilities

    phi = beta(a, b, (K, K))

    # sample block assignments

    for n in range(1, N+1):
        Z[n-1,:] = multinomial(1, theta)

    # sample edges

    Y = (uniform(size=(N, N)) <= dot(dot(Z, phi), Z.T)).astype(int)

    return Z, Y
Exemplo n.º 14
0
def testDrawVarying ():
    #data = np.array ([8,5,4,2])
    data0 = rnd.exponential (10, 30)
    data1 = rnd.triangular (4, 5, 5, 20)
    data2 = rnd.triangular (10,11,11, 10)
    data3 = rnd.triangular (14,14,15, 5)
    data4 = rnd.triangular (0,1,1, 10)
    data = np.concatenate ((data0,data1,data2,data3,data4), axis=0)
    #data = data0

    data = rnd.beta (5,2,1000)
    data = [10 * x for x in data]

    #data0 = rnd.triangular (5, 5, 10, 100)
    #data1 = rnd.triangular (10, 20, 20, 100)
    #data2 = rnd.triangular (20, 21, 21, 0)
    #data = np.concatenate ((data0, data1, data2), axis=0)

    #data = rnd.triangular (0, 10, 15, 200)
    
    data = np.sort (data)
    #data = np.ceil (data, None)
    
    blocks = bucket (data, limit=0.1, depth=3, individualSigma = 2.0, maxExtremas=0, minHeight=0.0)
    draw (data, blocks=blocks)
Exemplo n.º 15
0
def thompson_sampling(variables,context):
	versions = context['versions']
	#import models individually to avoid circular dependency
	Variable = apps.get_model('engine', 'Variable')
	Value = apps.get_model('engine', 'Value')
	Version = apps.get_model('engine', 'Version')
	version_content_type = ContentType.objects.get_for_model(Version)
	#priors we set by hand - will use instructor rating and confidence in future
	prior_success = 1.9
	prior_failure = 0.1
	#max value of version rating, from qualtrics
	max_rating = 1

	version_to_show = None
	max_beta = 0

	for version in versions:
		student_ratings = Variable.objects.get(name='student_rating').get_data({'version': version}).all()
		rating_count = student_ratings.count()
		rating_average = student_ratings.aggregate(Avg('value'))
		rating_average = rating_average['value__avg']
		if rating_average is None:
			rating_average = 0
		else:
		 rating_average = rating_average * 0.1

		#get instructor conf and use for priors later
		#add priors to db
		prior_success_db, created = Variable.objects.get_or_create(name='thompson_prior_success', content_type=version_content_type)
		prior_success_db_value = Value.objects.filter(variable=prior_success_db, object_id=version.id).last()
		if prior_success_db_value:
			#there is already a value, so update it
			prior_success_db_value.value = prior_success
			prior_success_db_value.save()
		else:
			#no db value
			prior_success_db_value = Value.objects.create(variable=prior_success_db, object_id=version.id, value=prior_success)

		prior_failure_db, created = Variable.objects.get_or_create(name='thompson_prior_failure', content_type=version_content_type)
		prior_failure_db_value = Value.objects.filter(variable=prior_failure_db, object_id=version.id).last()
		if prior_failure_db_value:
			#there is already a value, so update it
			prior_failure_db_value.value = prior_failure
			prior_failure_db_value.save()
		else:
			#no db value
			prior_failure_db_value = Value.objects.create(variable=prior_failure_db, object_id=version.id, value=prior_failure)
	

		#TODO - log to db later?
		successes = (rating_average * rating_count) + prior_success
		failures = (max_rating * rating_count) - (rating_average * rating_count) + prior_failure

		version_beta = beta(successes, failures)

		if version_beta > max_beta:
			max_beta = version_beta
			version_to_show = version

	return version_to_show
Exemplo n.º 16
0
 def generate_samples(self, game, profile, sample_count):
     if profile not in self.profile_info:
         self.profile_info[profile] = {r: {s: {'offset': rand.normal(0, self.max_stdev*self.spread_mult), 
                 'stdev': rand.beta(2,1) * self.max_stdev} for s in profile[r]} for r in profile.keys()}
     return {r: [PayoffData(s, profile[r][s], game.getPayoff(profile, r, s) +
                 [rand.normal(choice(self.multipliers)*self.profile_info[profile][r][s]['offset'],
                              self.profile_info[profile][r][s]['stdev'])
                  for __ in range(sample_count)]) for s in profile[r].keys()] for r in profile.keys()}
Exemplo n.º 17
0
def simulate_BB(tot, mean_p, sigma):
    a = mean_p * (1/sigma**2 - 1)
    b = (1-mean_p) * (1/sigma**2 - 1)

    p = beta(a, b)
    counts = binomial(tot, p)
    #sys.stderr.write("%f %f %i\n"%(mean_p,p,counts))
    return counts, (tot-counts)
Exemplo n.º 18
0
def uniform_noise(max_half_width, samples):
    """
	Generate uniform random noise to add to one payoff in a game.

	max_range: maximum half-width of the uniform distribution
	samples: numer of samples to take of every profile
	"""
    hw = beta(2, 1) * max_half_width
    return U(-hw, hw, samples)
Exemplo n.º 19
0
 def _gem(self, gma):
     """
     Generate the stick-breaking process with parameter gma.
     """
     prev = 1
     while True:
         beta_k = beta(1, gma) * prev
         prev -= beta_k
         yield beta_k
Exemplo n.º 20
0
def genRandom(size=3):
    #generate |size| random roc curves (xs=FP, ys=TP)
    xss,yss = [], []
    for _ in range(size):
        xs = [0.0,1.0]
        xprev = 0.0
        for _ in range(10):
            xs.append(xprev + choice([0.0,random.beta(1,10)*(1.-xprev)])) #put in some duplicate xs, as may happen from time to time
            xprev = xs[-1]
        xss.append(xs)
    for _ in range(size):
        ys = [0.0,1.0]
        yprev = 0.0
        for _ in range(10):
            ys.append(yprev + random.beta(1,5)*(1.-yprev))
            yprev = ys[-1]
        yss.append(ys)
    return xss,yss
Exemplo n.º 21
0
def assign_error(seq, mu):
	beta = 1
	alpha = (beta * mu) / (1 - mu)
	errors= npr.beta(alpha, beta, len(seq)) 
#	global PLOT
#	if PLOT:
#		plot_errors(errors)
#		PLOT = 0;
	# print errors
	return errors
Exemplo n.º 22
0
    def recruit_soldiers(self):
        dead_soldiers = []
        dead_soldiers = filter(lambda x:
                               (self.data[x] is None) or (self.data[x].alive is False),
                               self.get_rank(1))

        for mia in dead_soldiers:
            refbase, refscale, ss = self.top_rank, self.top_age - 1, 1
            aa = int(round(beta(1, refbase, 1) * refscale + 1))
            qq, ii = self.fill_quality_ideology()
            self.data[mia] = Soldier(1, ss, aa, qq, ii, mia)
Exemplo n.º 23
0
def stick(gamma, tol=1e-3):
    """
    Truncated sample from a dirichlet process using stick breaking
    """
    betas = []
    Z = 0.
    while 1 - Z > tol:
        new_beta = (1 - Z) * beta(1., gamma)
        betas.append(new_beta)
        Z += new_beta
    return {i: b / Z for i, b in enumerate(betas)}
Exemplo n.º 24
0
def generate_docs_with_hlda(num_docs, words_per_doc, vocab_size, topic_to_word_beta, topic_dist_m, topic_dist_pi, new_child_gamma):
    params = {}
    params["topic_to_word_param"] = [topic_to_word_beta] * vocab_size
    params["words_per_doc_distribution"] = util.Poisson(words_per_doc)
    pta = topic_dist_m * topic_dist_pi
    ptb = topic_dist_pi - pta
    params["parent_topic_bias_sample"] = lambda: beta(pta, ptb)
    params["new_child_gamma"] = new_child_gamma
    topic_root = Topic_node(params)
    documents = [generate_one_doc_with_hlda(topic_root, params) for i in range(num_docs)]
    return documents, topic_root
def getnextsplit(versionTestResults,percentile,stopRatio,n=10000):
    #collect current convertion rates and MC results
    print versionTestResults
    ratioList = []
    MCDict = {}
    for version in versionTestResults:
        #get current convertion rates for all versions
        versionId = version.get("versionId")
        visits = version.get("visits")
        clicks = version.get("clicks")
        #convertion rate = click / visit
        ratioList.append(float(clicks)/float(visits))
        MCRateList = []
        for i in range(n):
            if clicks == 0:
                MCRateList.append(0)
            else:
                MCRateList.append(random.beta(clicks,visits-clicks+1))
        MCDict[versionId] = MCRateList
    MCData = pd.DataFrame(MCDict)
    #print ratioList
    #get version id
    versionIds = list(MCData.columns.values)
    #get the of the best version
    bestVersionIndex = ratioList.index(max(ratioList))
    #convert simulation results from version id to index
    #for
    #MCData["max"] = versionIds.index(MCData.idxmax(axis = 1))
    maxVersionIndex = []
    for maxVersionId in list(MCData.idxmax(axis = 1)):
        maxVersionIndex.append(versionIds.index(maxVersionId))
    MCData["max"] = maxVersionIndex
    #print MCData
    #print MCData
    #print versionIds
    #get next split test info
    #nextSplitList = []
    nextSplitDict = {}
    for versionId in versionIds:
        #nextSplitDict = {}
        k = sum(MCData["max"] == MCData.columns.get_loc(versionId))
        #print k
        #nextSplitDict["versionId"] = versionId
        #nextSplitDict["ratio"] = float(k)/float(n)
        nextSplitDict[versionId] = float(k)/float(n)
        #nextSplitList.append(nextSplitDict)
    #calculate the remaining value of this test
    remainingValue = getremainingvalue(MCData,bestVersionIndex,percentile,n)
    #check whether to stop this test when the remaining value is too small
    bestVersionRate = ratioList[bestVersionIndex]
    remainingValue = getstopinfo(bestVersionRate,remainingValue,stopRatio)
    #print stopInfo
    #return({"nextSplitRatio":nextSplitList,"remainingValue":remainingValue})
    return({"nextSplitRatio":nextSplitDict,"remainingValue":remainingValue})
Exemplo n.º 26
0
def sample_eta_west(eta, nact, n0, a=1, b=0):
    """Samples the concentration parameter eta"""

    ## compute x, r and p
    x = rn.beta(eta + 1, n0)
    lx = np.log(x)
    r = (a + nact - 1) / (n0 * (b - lx))
    p = r / (r + 1)

    ## return
    return rn.gamma(a + nact, 1 / (b - lx)) if rn.rand() < p else rn.gamma(a + nact - 1, 1 / (b - lx))
Exemplo n.º 27
0
 def do_TS_rel_champ(self, champ):
     samples = zeros(self.nArms)
     for arm in range(self.nArms):
         if arm == champ:
             samples[arm] = 0.5
         else:
             a = self.RealWins[arm, champ]
             b = self.RealWins[champ, arm]
             samples[arm] = beta(a, b)
     challenger = samples.argmax()
     return challenger
Exemplo n.º 28
0
def simulate_BNB(mean, sigma, n):
    # sys.stderr.write("%g %g %g\n" % (mean, sigma, n))
    mean_p = np.float64(n) / (n+mean)
    sigma = (1 / sigma)**2
    a = mean_p * (sigma)+1
    b = (1 - mean_p)*sigma
    
    p = beta(a, b)
    #sys.stderr.write("%f %f\n"%(n,p))
    counts = negative_binomial(n, p)
    return counts
 def __calculate_alpha(self, graph, alpha_prev):
     '''Picks alpha from a mixture of 2 gamma distributions'''
     num_communities = graph.number_of_communities
     num_players = graph.number_of_nodes
     beta_z = npr.beta(alpha_prev + 1, num_players)
     #generate a uniform random number to pick which gamma from the mixture
     rnd_unif = npr.uniform()
     inv_mixture_scale = self.__gamma_b - math.log(beta_z)
     mixture_scale = 1.0 / inv_mixture_scale
     if rnd_unif / (1 - rnd_unif) <  ((self.__gamma_a + num_communities)/
                                       (num_players * inv_mixture_scale)):
         return npr.gamma(self.__gamma_a + num_communities, mixture_scale)
     return npr.gamma(self.__gamma_a + num_communities - 1, mixture_scale)
Exemplo n.º 30
0
def generate_beta_null_table(colsums, a=1, b=1):
    from numpy.random import beta, binomial #,seed
    #colsums = [x==0 and x+1 or x for x in colsums]
    #allele1
    # for testing purposes only
    # seed(100)
    if colsums[0] == 0:
        n11=0
    else:
        p11 = beta(a, b)
        p11=float(p11)
        n11 = binomial(colsums[0],p11)
    n21= colsums[0] -n11
    #allele2    
    if colsums[1] ==0:
        n12=0
    else:
        p12 = beta(a,b)
        p12=float(p12)
        n12 = binomial(colsums[1],p12)
    n22 = colsums[1]-n12
    return n11,n12,n21,n22
Exemplo n.º 31
0
def thompson_sampling(variables, context):
    versions = context['mooclet'].version_set.all()
    #import models individually to avoid circular dependency
    Variable = apps.get_model('engine', 'Variable')
    Value = apps.get_model('engine', 'Value')
    Version = apps.get_model('engine', 'Version')
    # version_content_type = ContentType.objects.get_for_model(Version)
    #priors we set by hand - will use instructor rating and confidence in future
    prior_success = 19
    prior_failure = 1
    #max value of version rating, from qualtrics
    max_rating = 10

    version_to_show = None
    max_beta = 0

    for version in versions:
        student_ratings = Variable.objects.get(name='student_rating').get_data(
            {
                'version': version
            }).all()
        rating_count = student_ratings.count()
        rating_average = student_ratings.aggregate(Avg('value'))
        rating_average = rating_average['value__avg']
        if rating_average is None:
            rating_average = 0

        #get instructor conf and use for priors later
        #add priors to db
        prior_success_db, created = Variable.objects.get_or_create(
            name='thompson_prior_success')
        prior_success_db_value = Value.objects.filter(
            variable=prior_success_db, version=version).last()
        if prior_success_db_value:
            #there is already a value, so update it
            prior_success_db_value.value = prior_success
            prior_success_db_value.save()
        else:
            #no db value
            prior_success_db_value = Value.objects.create(
                variable=prior_success_db,
                version=version,
                value=prior_success)

        prior_failure_db, created = Variable.objects.get_or_create(
            name='thompson_prior_failure')
        prior_failure_db_value = Value.objects.filter(
            variable=prior_failure_db, version=version).last()
        if prior_failure_db_value:
            #there is already a value, so update it
            prior_failure_db_value.value = prior_failure
            prior_failure_db_value.save()
        else:
            #no db value
            prior_failure_db_value = Value.objects.create(
                variable=prior_failure_db,
                version=version,
                value=prior_failure)

        #TODO - log to db later?
        successes = (rating_average * rating_count) + prior_success
        failures = (max_rating * rating_count) - (rating_average *
                                                  rating_count) + prior_failure

        version_beta = beta(successes, failures)

        if version_beta > max_beta:
            max_beta = version_beta
            version_to_show = version

    return version_to_show
Exemplo n.º 32
0
    def aggrBanditTSRun(self, methodsResultDict, methodsParamsDF, topK=20):

      if sorted([mI for mI in methodsParamsDF.index]) != sorted([mI for mI in methodsResultDict.keys()]):
        raise ValueError("Arguments methodsResultDict and methodsParamsDF have to define the same methods.")

      if np.prod([len(methodsResultDict.get(mI)) for mI in methodsResultDict]) == 0:
        raise ValueError("Argument methodsParamsDF contains in ome method an empty list of items.")

      if topK < 0 :
        raise ValueError("Argument topK must be positive value.")


      methodsResultDictI = methodsResultDict;
      methodsParamsDFI = methodsParamsDF;

      recommendedItemIDs = []

      for iIndex in range(0, topK):
        #print("iIndex: ", iIndex)
        #print(methodsResultDictI)
        #print(methodsParamsDFI)

        if len([mI for mI in methodsResultDictI]) == 0:
          return recommendedItemIDs[:topK];

        methodProbabilitiesDicI = {}

        # computing probabilities of methods
        for mIndex in methodsParamsDFI.index:
          #print("mIndexI: ", mIndex)
          methodI = methodsParamsDFI.loc[methodsParamsDFI.index == mIndex]#.iloc[0]
          # alpha + number of successes, beta + number of failures
          pI = beta(methodI.alpha0 + methodI.r, methodI.beta0 + (methodI.n - methodI.r), size=1)[0]
          methodProbabilitiesDicI[mIndex] = pI
        #print(methodProbabilitiesDicI)

        # get max probability of method prpabilities
        maxPorbablJ = max(methodProbabilitiesDicI.values())
        #print("MaxPorbablJ: ", maxPorbablJ)

        # selecting method with highest probability
        theBestMethodID = random.choice([aI for aI in methodProbabilitiesDicI.keys() if methodProbabilitiesDicI[aI] == maxPorbablJ])
    
        # extractiion results of selected method (method with highest probability)
        resultsOfMethodI = methodsResultDictI.get(theBestMethodID)
        #print(resultsOfMethodI)
    
        # select next item (itemID)
        selectedItemI = self.exportRouletteWheelRatedItem(resultsOfMethodI)
        #selectedItemI = self.exportRandomItem(resultsOfMethodI)
        #selectedItemI = self.exportTheMostRatedItem(resultsOfMethodI)
    
        #print("SelectedItemI: ", selectedItemI)
    
        recommendedItemIDs.append((selectedItemI, theBestMethodID))

        # deleting selected element from method results
        for mrI in methodsResultDictI:
            try:
                methodsResultDictI[mrI].drop(selectedItemI, inplace=True, errors="ignore")
            except:
                #TODO some error recordings?
                pass
        #methodsResultDictI = {mrI:methodsResultDictI[mrI].append(pd.Series([None],[selectedItemI])).drop(selectedItemI) for mrI in methodsResultDictI}
        #print(methodsResultDictI)

        # methods with empty list of items
        methodEmptyI = [mI for mI in methodsResultDictI if len(methodsResultDictI.get(mI)) == 0]

        # removing methods with the empty list of items
        methodsParamsDFI = methodsParamsDFI[~methodsParamsDFI.index.isin(methodEmptyI)]

        # removing methods definition with the empty list of items
        for meI in methodEmptyI: methodsResultDictI.pop(meI)
      return recommendedItemIDs[:topK]
 def _numpy(self, loc=0.0, scale=1.0, size=(1, )):
     return lambda: nr.beta(a=self.alpha, b=self.beta, size=size
                            ) * scale + loc
Exemplo n.º 34
0
def get_beta_sample(alpha):
    return beta(1, alpha)
Exemplo n.º 35
0
def sample_kappa(num_1_vec, num_0_vec, rho0, rho1):
    kappa_vec = beta(rho0 + num_1_vec, rho1 + num_0_vec)
    return kappa_vec
Exemplo n.º 36
0
 def pick(self, n_arms, history):
     # list of (# success, # failure)
     S_F = [(arm_record[0], arm_record[1] - arm_record[0])
            for arm_record in history]
     probs = [beta(s + 1, f + 1) for s, f in S_F]
     return argmax(probs)
Exemplo n.º 37
0
 def f():
     return beta(a, b)
Exemplo n.º 38
0
def beta_dist_2_10(num_examples):
    return [rnd.beta(2.0, 10.0) for _ in range(num_examples)]
 def sample(self):
     reward = beta(self.a, self.b)
     return reward
Exemplo n.º 40
0
 def test_beta_small_parameters(self):
     # Test that beta with small a and b parameters does not produce
     # NaNs due to roundoff errors causing 0 / 0, gh-5851
     random.seed(1234567890)
     x = random.beta(0.0001, 0.0001, size=100)
     assert_(not np.any(np.isnan(x)), 'Nans in random.beta')
Exemplo n.º 41
0
 def choose_next_arm(self):
     thetas = beta(self.success, self.failure)
     return np.argmax(thetas)
Exemplo n.º 42
0
def randPoissonBeta(params, n):

    x = beta(params.alpha, params.beta, n)
    p = poisson(x * params.gamma)

    return p
Exemplo n.º 43
0
 def sample(self):
     #return betavariate(self.params[1], self.params[0])
     return beta(self.params[1], self.params[0])
        # data['meanCommunitySize'] = N.mean(map(len, valComm))
        # data['numCommunities'] = len(valComm)
        # data['influenceMoveCount'] = count
        # ===================================================================
        data['0:1 Distribution'] = zeroToOne(g)
        csvwr.writerow(data)
        # Save graph
        if output_json_graphs and count % numNodes == 0:
            save_to_jsonfile(fileName + '_iter_' + str(i) + '_gen_' + str(count) + '.json', g)
    f.close()


if __name__ == '__main__':
    args = parser.parse_args()
    Gs = []
    if debug_mode:
        print("Create network")
    G = human_social_network_iterations((30, 30), 50, False, random.beta, *beta_params[int(args.extraversion)])

    if debug_mode:
        print("Assign conformity values")
    for node in G.nodes():
        G.add_node(node, conformity=random.beta(*beta_params[int(args.conformity)]))
    if debug_mode:
        print("Save iterations of the graph")

    if debug_mode:
        print("Run DSIT")
    simulate(G, data_folder + 'graph_ext_' + args.extraversion + '_conf_' + args.conformity + '_simnum_' + args.sim_num,
             int(args.iterations))
Exemplo n.º 45
0
 def rbeta():
     return beta(1, gamma)
Exemplo n.º 46
0
if sys.argv[1] == "SC_1M_1N":
	priorfile = "N1\tN2\tNa\tTsplit\tTsc\tM12\tM21\n"
	for sim in range(nMultilocus):
		priorfile += "{0:.5f}\t{1:.5f}\t{2:.5f}\t{3:.5f}\t{4:.5f}\t{5:.5f}\t{6:.5f}\n".format(N1[sim], N2[sim], Na[sim], Tsplit[sim], Tsc[sim], M12[sim], M21[sim])
		for locus in range(nLoci):
			print("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6:.5f}\t{7:.5f}\t{8:.5f}\t{9:.5f}\t{10:.5f}\t{11:.5f}\t{12:.5f}\t{13:.5f}".format(nsam_tot[locus], theta[locus], rho[locus], L[locus], nsamA[locus], nsamB[locus], M12[sim], M21[sim], N1[sim], N2[sim], Tsc[sim], Tsplit[sim], Tsplit[sim], Na[sim]))
	outfile = open("priorfile.txt", "w")
	outfile.write(priorfile)
	outfile.close()

if sys.argv[1] == "SC_1M_2N":
	priorfile = "N1\tN2\tNa\tshape_N_a\tshape_N_b\tTsplit\tTsc\tM12\tM21\n"
	for sim in range(nMultilocus):
		priorfile += "{0:.5f}\t{1:.5f}\t{2:.5f}\t{3:.5f}\t{4:.5f}\t{5:.5f}\t{6:.5f}\t{7:.5f}\t{8:.5f}\n".format(N1[sim], N2[sim], Na[sim], shape_N_a[sim], shape_N_b[sim], Tsplit[sim], Tsc[sim], M12[sim], M21[sim])
		# vectors of size 'nLoci' containing parameters
		scalar_N = beta(shape_N_a[sim], shape_N_b[sim], size=nLoci)
		rescale = shape_N_a[sim] / (shape_N_a[sim] + shape_N_b[sim])
		N1_vec = [ N1[sim]*i/rescale for i in scalar_N ]
		N2_vec = [ N2[sim]*i/rescale for i in scalar_N ]
		Na_vec = [ Na[sim]*i/rescale for i in scalar_N ]
		for locus in range(nLoci):
			print("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6:.5f}\t{7:.5f}\t{8:.5f}\t{9:.5f}\t{10:.5f}\t{11:.5f}\t{12:.5f}\t{13:.5f}".format(nsam_tot[locus], theta[locus], rho[locus], L[locus], nsamA[locus], nsamB[locus], M12[sim], M21[sim], N1_vec[locus], N2_vec[locus], Tsc[sim], Tsplit[sim], Tsplit[sim], Na_vec[locus]))
	outfile = open("priorfile.txt", "w")
	outfile.write(priorfile)
	outfile.close()

if sys.argv[1] == "SC_2M_1N":
	if modeBarrier == "beta":
		priorfile = "N1\tN2\tNa\tTsplit\tTsc\tM12\tshape_M12_a\tshape_M12_b\tM21\tshape_M21_a\tshape_M21_b\n"
	else:
		priorfile = "N1\tN2\tNa\tTsplit\tTsc\tM12\tnBarriersM12\tM21\tnBarriersM21\n"
 def echantillon(self):
     """Un échantillon aléatoire de ce posterior Beta."""
     return beta(self.N[1], self.N[0])
 def select_action(self) -> int:
     samples = [
         beta(a=self._betas[a][0], b=self._betas[a][1])
         for a in range(self._n_arms)
     ]
     return np.argmax(samples)
Exemplo n.º 49
0
def get_action_value(s, f):
    return beta(s + 1, f + 1)
Exemplo n.º 50
0
    def generate(self,
                 N=None,
                 K=None,
                 hyperparams=None,
                 mode='predictive',
                 symmetric=True,
                 **kwargs):
        if mode == 'generative':
            self.update_hyper(hyperparams)
            alpha, gmma, delta = self.get_hyper()
            N = int(N)
            _name = self.__module__.split('.')[-1]
            if _name == 'immsb_cgs':
                # @todo: compute the variance for random simulation
                # Number of table in the CRF
                if symmetric is True:
                    m = alpha * N * (digamma(N + alpha) - digamma(alpha))
                else:
                    m = alpha * N * (digamma(2 * N + alpha) - digamma(alpha))

                # Number of class in the CRF
                K = int(gmma * (digamma(m + gmma) - digamma(gmma)))
                alpha = gem(gmma, K)

            i = 0
            while i < 3:
                try:
                    dirichlet(alpha, size=N)
                    i = 0
                    break
                except ZeroDivisionError:
                    # Sometimes umprobable values !
                    alpha = gem(gmma, K)
                    i += 1

            # Generate Theta
            if i > 0:
                params, order = zip(
                    *np.sorted(zip(alpha, range(len(alpha)), reverse=True)))
                _K = int(1 / 3. * len(alpha))
                alpha[order[:_K]] = 1
                alpha[order[_K:]] = 0
                theta = multinomial(1, alpha, size=N)
            else:
                theta = dirichlet(alpha, size=N)

            # Generate Phi
            phi = beta(delta[0], delta[1], size=(K, K))
            if symmetric is True:
                phi = np.triu(phi) + np.triu(phi, 1).T

            self._theta = theta
            self._phi = phi
        elif mode == 'predictive':
            try:
                theta, phi = self.get_params()
            except:
                return self.generate(N, K, hyperparams, 'generative',
                                     symmetric)
            K = theta.shape[1]

        pij = self.likelihood(theta, phi)

        # Treshold
        #pij[pij >= 0.5 ] = 1
        #pij[pij < 0.5 ] = 0
        #Y = pij

        # Sampling
        pij = np.clip(pij, 0, 1)
        Y = sp.stats.bernoulli.rvs(pij)

        #for j in xrange(N):
        #    print 'j %d' % j
        #    for i in xrange(N):
        #        zj = categorical(theta[j])
        #        zi = categorical(theta[i])
        #        Y[j, i] = sp.stats.bernoulli.rvs(B[zj, zi])
        return Y, theta, phi
Exemplo n.º 51
0
 def select(self):
     randoms = beta(1+self._win_of_arms, 1+self._loss_of_arms)
     return np.argmax(randoms)
Exemplo n.º 52
0
def get_random_duration(index: int):
    if(index==28 or index==1):
        return 0
    return (rand.beta(a= 11.0/20.0, b= 28.0/8.0)*5)+2
Exemplo n.º 53
0
def get_beta_dist(num_events, num_trials, num_samples=50000):
    return beta(num_events + 1, num_trials - num_events + 1, num_samples)
Exemplo n.º 54
0
##### MAIN ####

n = 1000 # size of the matrix
trials = 1000 # trials
a_values = [0.8, 1.5, 25.8] # values of the parameter a
b_values = [2,10] # values of the parameter b
alpha_values = [1,10,50,100] # values of the parameter alpha

for a in a_values: # a parameter for Beta
    for b in b_values: # b parameter for Beta
        for alpha in alpha_values: # alpha parameter 
            eig = []
            for k in range(trials):
            
                # random vectors (p,q)
                q = np.append(0,beta(alpha,alpha + a+b+2,n-1))
                p = beta(alpha+a+1,alpha + b+1,n)
            
                # matrix entries
                s = np.sqrt(p*(1-q))
                q = q[1:]
                p = p[:-1]
                t = np.sqrt((1-p)*q)
                
                B = np.diag(s) + np.diag(t,-1)
                J = np.dot(B,B.transpose())
                # eigenvalues computations
                eig = np.append(eig, LA.eigvalsh(J))
            
            plt.hist(eig,bins= 300, density = 1, alpha = 0.7, color = 'r')
            plt.title(r'$\alpha = %.1f, \, a = %.1f,\, b = %.1f$' %(alpha,a,b))
Exemplo n.º 55
0
def simulateUtilityScore(N, VehicleShare, NumericalAttributes, CategoricalAttributes):

    mcost_k_M = 100 * beta(a=4.5, b=2, size=N[0])
    mcost_t_M = 0.1 * beta(a=3.5, b=2, size=N[0])
    mcost_utility_M = ((-2e-3 * gamma(mcost_k_M, mcost_t_M, [len(NumericalAttributes['monthly_cost']), N[0]]))
                       * np.array(NumericalAttributes['monthly_cost'])[:, np.newaxis])
    mcost_utility_M -= mcost_utility_M.mean(axis=0)

    mcost_k_X = 100 * beta(a=4., b=2, size=N[1])
    mcost_t_X = 0.1 * beta(a=4.5, b=2, size=N[1])
    mcost_utility_X = ((-1.6e-3 * gamma(mcost_k_X, mcost_t_X, [len(NumericalAttributes['monthly_cost']), N[1]]))
                       * np.array(NumericalAttributes['monthly_cost'])[:, np.newaxis])
    mcost_utility_X -= mcost_utility_X.mean(axis=0)

    mcost_k_B = 100 * beta(a=3.5, b=2, size=N[2])
    mcost_t_B = 0.1 * beta(a=5, b=2, size=N[2])
    mcost_utility_B = ((-1.8e-3 * gamma(mcost_k_B, mcost_t_B, [len(NumericalAttributes['monthly_cost']), N[2]]))
                       * np.array(NumericalAttributes['monthly_cost'])[:, np.newaxis])
    mcost_utility_B -= mcost_utility_B.mean(axis=0)

    mcost_utility = np.hstack([mcost_utility_M, mcost_utility_X, mcost_utility_B]).T

    upcost_k_M = 100 * beta(a=4.5, b=2, size=N[0])
    upcost_t_M = 0.1 * beta(a=3.5, b=2, size=N[0])
    upcost_utility_M = ((-2e-5 * gamma(upcost_k_M, upcost_t_M, [len(NumericalAttributes['upfront_cost']), N[0]]))
                       * np.array(NumericalAttributes['upfront_cost'])[:, np.newaxis])
    upcost_utility_M -= upcost_utility_M.mean(axis=0)

    upcost_k_X = 100 * beta(a=4., b=2, size=N[1])
    upcost_t_X = 0.1 * beta(a=4.5, b=2, size=N[1])
    upcost_utility_X = ((-1e-5 * gamma(upcost_k_X, upcost_t_X, [len(NumericalAttributes['upfront_cost']), N[1]]))
                       * np.array(NumericalAttributes['upfront_cost'])[:, np.newaxis])
    upcost_utility_X -= upcost_utility_X.mean(axis=0)

    upcost_k_B = 100 * beta(a=3.5, b=2, size=N[2])
    upcost_t_B = 0.1 * beta(a=5, b=2, size=N[2])
    upcost_utility_B = ((-1.5e-5 * gamma(upcost_k_B, upcost_t_B, [len(NumericalAttributes['upfront_cost']), N[2]]))
                       * np.array(NumericalAttributes['upfront_cost'])[:, np.newaxis])
    upcost_utility_B -= upcost_utility_B.mean(axis=0)

    upcost_utility = np.hstack([upcost_utility_M, upcost_utility_X, upcost_utility_B]).T

    term_k_M = 100 * beta(a=4.5, b=2, size=N[0])
    term_t_M = 0.1 * beta(a=3.5, b=2, size=N[0])
    term_utility_M = ((-1e-2 * gamma(term_k_M, term_t_M, [len(NumericalAttributes['term']), N[0]]))
                       * np.array(NumericalAttributes['term'])[:, np.newaxis])
    term_utility_M -= term_utility_M.mean(axis=0)

    term_k_X = 100 * beta(a=4., b=2, size=N[1])
    term_t_X = 0.1 * beta(a=4.5, b=2, size=N[1])
    term_utility_X = ((-1.2e-2 * gamma(term_k_X, term_t_X, [len(NumericalAttributes['term']), N[1]]))
                       * np.array(NumericalAttributes['term'])[:, np.newaxis])
    term_utility_X -= term_utility_X.mean(axis=0)

    term_k_B = 100 * beta(a=3.5, b=2, size=N[2])
    term_t_B = 0.1 * beta(a=5, b=2, size=N[2])
    term_utility_B = ((-1.2e-2 * gamma(term_k_B, term_t_B, [len(NumericalAttributes['term']), N[2]]))
                       * np.array(NumericalAttributes['term'])[:, np.newaxis])
    term_utility_B -= term_utility_B.mean(axis=0)

    term_utility = np.hstack([term_utility_M, term_utility_X, term_utility_B]).T


    worth_k_M = 1000 * beta(a=4.5, b=2, size=N[0])
    worth_t_M = 0.01 * beta(a=3.5, b=2, size=N[0])
    worth_utility_M = ((1.5e-5 * gamma(worth_k_M, worth_t_M, [len(NumericalAttributes['vehicle_worth']), N[0]]))
                     * np.array(NumericalAttributes['vehicle_worth'])[:, np.newaxis])
    worth_utility_M -= worth_utility_M.mean(axis=0)

    worth_k_X = 1000 * beta(a=4, b=2, size=N[1])
    worth_t_X = 0.01 * beta(a=4.5, b=2, size=N[1])
    worth_utility_X = ((1.5e-5 * gamma(worth_k_X, worth_t_X, [len(NumericalAttributes['vehicle_worth']), N[1]]))
                       * np.array(NumericalAttributes['vehicle_worth'])[:, np.newaxis])
    worth_utility_X -= worth_utility_X.mean(axis=0)

    worth_k_B = 1000 * beta(a=3.5, b=2, size=N[2])
    worth_t_B = 0.01 * beta(a=4, b=2, size=N[2])
    worth_utility_B = ((1.5e-5 * gamma(worth_k_B, worth_t_B, [len(NumericalAttributes['vehicle_worth']), N[2]]))
                       * np.array(NumericalAttributes['vehicle_worth'])[:, np.newaxis])
    worth_utility_B -= worth_utility_B.mean(axis=0)

    worth_utility = np.hstack([worth_utility_M, worth_utility_X, worth_utility_B]).T

    range_k_M = 1000 * beta(a=4.5, b=2, size=N[0])
    range_t_M = 0.007 * beta(a=4, b=2, size=N[0])
    range_utility_M = ((2e-3 * gamma(range_k_M, range_t_M, [len(NumericalAttributes['range']), N[0]]))
                       * np.array(NumericalAttributes['range'])[:, np.newaxis])
    range_utility_M -= range_utility_M.mean(axis=0)

    range_k_X = 1000 * beta(a=4, b=2, size=N[1])
    range_t_X = 0.008 * beta(a=4, b=2, size=N[1])
    range_utility_X = ((2e-3 * gamma(range_k_X, range_t_X, [len(NumericalAttributes['range']), N[1]]))
                       * np.array(NumericalAttributes['range'])[:, np.newaxis])
    range_utility_X -= range_utility_X.mean(axis=0)

    range_k_B = 1000 * beta(a=3.5, b=2, size=N[2])
    range_t_B = 0.008 * beta(a=4, b=2, size=N[2])
    range_utility_B = ((2e-3 * gamma(range_k_B, range_t_B, [len(NumericalAttributes['range']), N[2]]))
                       * np.array(NumericalAttributes['range'])[:, np.newaxis])
    range_utility_B -= range_utility_B.mean(axis=0)

    range_utility = np.hstack([range_utility_M, range_utility_X, range_utility_B]).T

    charge_k_M = 1000 * beta(a=4.5, b=2, size=N[0])
    charge_t_M = 0.007 * beta(a=4, b=2, size=N[0])
    charge_utility_M = ((2e-3 * gamma(charge_k_M, charge_t_M, [len(NumericalAttributes['charge']), N[0]]))
                       * np.array(NumericalAttributes['charge'])[:, np.newaxis])
    charge_utility_M -= charge_utility_M.mean(axis=0)

    charge_k_X = 1000 * beta(a=4, b=2, size=N[1])
    charge_t_X = 0.008 * beta(a=4, b=2, size=N[1])
    charge_utility_X = ((2e-3 * gamma(charge_k_X, charge_t_X, [len(NumericalAttributes['charge']), N[1]]))
                       * np.array(NumericalAttributes['charge'])[:, np.newaxis])
    charge_utility_X -= charge_utility_X.mean(axis=0)

    charge_k_B = 1000 * beta(a=3.5, b=2, size=N[2])
    charge_t_B = 0.008 * beta(a=4, b=2, size=N[2])
    charge_utility_B = ((2e-3 * gamma(charge_k_B, charge_t_B, [len(NumericalAttributes['charge']), N[2]]))
                       * np.array(NumericalAttributes['charge'])[:, np.newaxis])
    charge_utility_B -= charge_utility_B.mean(axis=0)

    charge_utility = np.hstack([charge_utility_M, charge_utility_X, charge_utility_B]).T

    energy_sig_M = 0.4 * beta(a=10, b=2, size=N[0])
    energy_mu_M = normal(loc=0.9, scale=0.1, size=N[0])
    energy_inter = (1 * normal(energy_mu_M, energy_sig_M, [1, N[0]]))
    energy_utility_M = np.vstack([-1 * energy_inter, energy_inter]).T

    energy_sig_X = 0.4 * beta(a=10, b=2, size=N[1])
    energy_mu_X = normal(loc=0.9, scale=0.1, size=N[1])
    energy_inter = (1 * normal(energy_mu_X, energy_sig_X, [1, N[1]]))
    energy_utility_X = np.vstack([-1 * energy_inter, energy_inter]).T

    energy_sig_B = 0.4 * beta(a=10, b=2, size=N[2])
    energy_mu_B = normal(loc=1.1, scale=0.1, size=N[2])
    energy_inter = (1.2 * normal(energy_mu_B, energy_sig_B, [1, N[2]]))
    energy_utility_B = np.vstack([-1 * energy_inter, energy_inter]).T

    energy_utility = np.vstack([energy_utility_M, energy_utility_X, energy_utility_B])

    type_mix_M = binomial(n=1, p=0.4, size=N[0])
    type_sig_M = 2 * beta(a=10, b=2, size=N[0])
    type_mu_M = normal(loc=(3 * (type_mix_M - 0.5)), scale=0.1, size=N[0])
    type_inter = (0.5 * normal(type_mu_M, type_sig_M, [1, N[0]]))
    type_utility_M = np.vstack([type_inter, -1 * type_inter]).T

    type_mix_X = binomial(n=1, p=0.2, size=N[1])
    type_sig_X = 2 * beta(a=10, b=2, size=N[1])
    type_mu_X = normal(loc=(3 * (type_mix_X - 0.5)), scale=0.1, size=N[1])
    type_inter = (0.5 * normal(type_mu_X, type_sig_X, [1, N[1]]))
    type_utility_X = np.vstack([type_inter, -1 * type_inter]).T

    type_mix_B = binomial(n=1, p=0.4, size=N[2])
    type_sig_B = 2 * beta(a=10, b=2, size=N[2])
    type_mu_B = normal(loc=(3 * (type_mix_B - 0.5)), scale=0.1, size=N[2])
    type_inter = (0.5 * normal(type_mu_B, type_sig_B, [1, N[2]]))
    type_utility_B = np.vstack([type_inter, -1 * type_inter]).T

    type_utility = np.vstack([type_utility_M, type_utility_X, type_utility_B])
    type_utility = type_utility.clip(-5, 5)

    brand_scale = 10 * beta(a=10, b=2, size=sum(N))
    brand_alloc = dirichlet(10*np.array([0.06, 0.07, 0.07, 0.01, 0.09, 0.44, 0.16, 0.1]), size=sum(N))
    brand_utility = brand_alloc * brand_scale[:, np.newaxis]
    brand_utility -= brand_utility.mean(axis=1)[:, np.newaxis]

    utility = np.hstack([brand_utility, mcost_utility, upcost_utility, term_utility, worth_utility, range_utility,
                         charge_utility, energy_utility, type_utility])

    # simulate the current market share
    simCust = multinomial(1, list(VehicleShare.values()), sum(N))
    simProduct = [list(VehicleShare.keys())[x] for x in np.argmax(simCust, axis=1)]

    aug = pd.DataFrame(np.array([list(range(1, sum(N) + 1, 1)), ['Millenial'] * N[0] + ['Gen X'] * N[1]
                                 + ['Baby Boomer'] * N[2], simProduct]).T, columns = ['id', 'segment', 'current brand'])

    k = 3  # we start at three as first three columns reserved for id, segment and current brand
    topLevel = [0, 1, 2]
    topLevelLab = ['id', 'segment', 'current brand']
    bottomLevelLab = ['id', 'segment', 'current brand']
    dictAttributes = {**CategoricalAttributes, **NumericalAttributes}
    for col in ['id', 'brand', 'model', 'monthly_cost', 'upfront_cost', 'term',
                'vehicle_worth', 'range', 'charge', 'energy', 'vehicle_type']:
        if col not in ['id', 'model']:
            topLevel = topLevel + [k] * len(list(dictAttributes[col]))
            topLevelLab = topLevelLab + [col]
            bottomLevelLab = bottomLevelLab + [lvl for lvl in list(dictAttributes[col])]
            k += 1

    midx = pd.MultiIndex(levels=[list(topLevelLab), list(bottomLevelLab)],
                         codes=[topLevel, list(range(len(bottomLevelLab)))])

    utilityDf = pd.DataFrame(pd.concat([aug, pd.DataFrame(utility)], axis=1).values, columns=midx)

    return utilityDf
Exemplo n.º 56
0
                #print (j, total_count, tmp_cnt, ucb, len(candidate_dict))

            max_k_list = [
                k[0] for k in candidate_dict.items()
                if k[1] == max(candidate_dict.values())
            ]
            selected_arm = random.choice(max_k_list)
            #print (max_k_list, candidate_dict)
            #print ("selected", selected_arm)

        elif para_mode == 3:
            for j in candidate_dict:
                tmp_a = (tmp_arms[j][cluster_id]["success"] + 1) * b_ts
                tmp_b = (tmp_arms[j][cluster_id]["fail"] + 1) * b_ts
                candidate_dict[j] = beta(a=tmp_a, b=tmp_b)

            max_k_list = [
                k[0] for k in candidate_dict.items()
                if k[1] == max(candidate_dict.values())
            ]
            selected_arm = random.choice(max_k_list)

        elif para_mode == 4:
            for j in candidate_dict:
                tmp_cnt = tmp_arms[j][cluster_id]["success"] + tmp_arms[j][
                    cluster_id]["fail"]
                if tmp_cnt == 0:
                    tmp_arms[j][cluster_id]["B"] = np.identity(n_features)
                    tmp_arms[j][cluster_id]["mu"] = np.zeros(
                        n_features).reshape(n_features, 1)
Exemplo n.º 57
0
 def _resample_rho(self):
     M = np.array(self.ms)
     alpha_post = self.alpha / self.K + M.sum(axis=0)
     beta_post = 1.0 + (1 - M).sum(axis=0)
     self.rho = npr.beta(alpha_post, beta_post)
Exemplo n.º 58
0
                # Save graph
                if output_json_graphs and count % numNodes == 0:
                    save_to_jsonfile(
                        fileName + '_iter_' + str(i) + '_gen_' + str(count) +
                        '.json', g)
    f.close()


if __name__ == '__main__':
    args = parser.parse_args()
    Gs = []
    if debug_mode:
        print("Create network")
    G = human_social_network_iterations((30, 30), 50, False, random.beta,
                                        *beta_params[int(args.extraversion)])

    if debug_mode:
        print("Assign conformity values")
    for node in G.nodes():
        G.add_node(node,
                   conformity=random.beta(*beta_params[int(args.conformity)]))
    if debug_mode:
        print("Save iterations of the graph")

    if debug_mode:
        print("Run DSIT")
    simulate(
        G, data_folder + 'graph_ext_' + args.extraversion + '_conf_' +
        args.conformity + '_simnum_' + args.sim_num + '_power_' + args.power,
        int(args.iterations))
Exemplo n.º 59
0
def get_p_dist(num_donations, num_impressions):
    return beta(num_donations + 1, num_impressions - num_donations + 1, 5000)
Exemplo n.º 60
0
def tag_tree(subtree, nodelist, father_tag, leaf_distr, percentage,
             beta_distribution_parameters):
    """Function tags all nodes of a given (binary) subtree with names FL or P."""
    # Arguments:
    #   subtree
    #                                       0   1       2           3           4
    #   nodelist                        - [id, depth, originaltag, finaltag, calc[taglist]]
    #   father_tag                      - 0 or 1 (FL or P)
    #   leaf_distr                      - [#FL, #P] - distribution of FL and P in the leave nodes
    #   percentage                      - [realP, percentage_P, percentage_FL]
    #   beta_distribution_parameters    - [percentage parasites, A_FL, B_FL, A_P, B_P]

    # parameters:
    pp = beta_distribution_parameters[0]
    #   for freeliving_distribution
    A_FL = beta_distribution_parameters[1]
    B_FL = beta_distribution_parameters[2]
    #   for parasite_distribution
    A_P = beta_distribution_parameters[3]
    B_P = beta_distribution_parameters[4]

    depth = -1
    if father_tag == 0:
        # freeliving_distribution:
        new_random = random.beta(a=A_FL, b=B_FL)
    else:
        # parasite_distribution:
        new_random = random.beta(a=A_P, b=B_P)

    tag = 0  # -> FL
    if new_random < pp:
        tag = 1  # -> P
    #               [id, depth, originaltag, finaltag, calc[taglist]]
    nodelist.append([subtree.name, depth, tag, '', []])
    subtree.name = subtree.name + "$" + str(len(nodelist) - 1)
    current_list_index = len(nodelist) - 1
    # if leaf node, then depth = 1, set finaltag, increase leaf distribution
    if subtree.is_terminal():
        depth = 1
        uniform_random = random.uniform(
        )  # choose if we want to delete ourselve
        # unknown node?
        if (tag == 1) and (uniform_random <= percentage[1]):
            nodelist[current_list_index][4].append(
                [tag])  # set start tag for calculation
        else:
            if (tag == 0) and (uniform_random <= percentage[2]):
                nodelist[current_list_index][4].append(
                    [tag])  # set start tag for calculation
            else:
                nodelist[current_list_index][4].append(
                    [0, 1])  # set start tag for calculation
        # count FL & P:
        if tag == 0:
            leaf_distr[0] = leaf_distr[0] + 1
        else:
            leaf_distr[1] = leaf_distr[1] + 1
    else:
        child_depth = 0
        for clade in subtree.clades:
            result = tag_tree(clade, nodelist, tag, leaf_distr, percentage,
                              beta_distribution_parameters)
            clade = result[0]
            nodelist = result[1]
            leaf_distr = result[2]
            child_depth = child_depth + result[3]
        depth = child_depth / len(subtree.clades) + 1
    nodelist[current_list_index][1] = depth
    return [subtree, nodelist, leaf_distr, depth]