def main(args):
    request_id = 0
    fake = Faker()
    fake.seed(0)
    with open(filename, "w+") as f:
        f.write("request id|client name|room type|request type|start date|end date|#adults|#children\n")
        for i in range(0, number_of_lines):
            request_id += 1
            client_name = fake.name()
            room_type = random.choice(data.rooms.keys())
            request_type = random.choice(["wedding", "party", "conference"]) if "conference" in room_type else random.choice(["holiday", "business"])
            start_date = data.random_date_between(datetime(2016, 1, 1).date(), datetime(2016, 3, 31))
            end_date = start_date + timedelta(1 + int(random.gammavariate(2, 2)))
            num_adults = max(1, int(random.betavariate(2, 5) * 10))
            num_children = int(random.betavariate(1, 5) * 10)
            if request_type == "conference":
                num_adults = max(1, int(random.normalvariate(25, 9)))
                num_children = 0
            elif request_type == "wedding":
                num_adults = max(2, int(random.normalvariate(25, 9)))
                num_children  = max(0, int(random.normalvariate(25, 12)))
            elif request_type == "party":
                num_adults = max(1, int(random.normalvariate(25, 9)))
                num_children  = max(0, int(random.normalvariate(25, 12)))
            elif request_type == "business":
                num_children /= 2
            f.write("{}|{}|{}|{}|{}|{}|{}|{}\n".format(request_id, client_name,
                room_type, request_type, start_date, end_date, num_adults,
                num_children))
Esempio n. 2
0
 def valueAt(self, evaluationTime):
     alpha = evaluateAt(self.alpha, evaluationTime)
     beta = evaluateAt(self.beta, evaluationTime)
     if self.rng.random() > 0.5:
         return 1.0 - random.betavariate(alpha, beta)
     else:
         return random.betavariate(alpha, beta)
Esempio n. 3
0
def showLuckGraph():
    name = str(window.charList.currentText())
    name2 = str(window.char2List.currentText())
    data1 = []
    data2 = []
    for x in range (0,20):
        char1Roll = 20*round(random.betavariate(dataFiles.Characters[name]['luck'],1),1)
        char2Roll = 20*round(random.betavariate(dataFiles.Characters[name2]['luck'],1),1)
        data1.append(char1Roll)
        data2.append(char2Roll)
    crits1 = []
    crits2 = []
    for i in data1:
        if i == 20:
            crits1.append(i)
    for i in data2:
        if i == 20:
            crits2.append(i)

    output = "CHAR1 CRITS: "+ str(len(crits1))+" FOR "+str(len(data1))+" ROLLS"
    output += "\nCHAR 2CRITS: "+ str(len(crits2))+" FOR "+str(len(data2))+" ROLLS"
    showOutput(output)


    #pw = pg.plot(data, pen='r')   # data can be a list of values or a numpy array
    #pw.plot(data2,pen='b')
    graphDialog.pGraph.clear()
    graphDialog.pGraph.plot(data1, pen='r')
    graphDialog.pGraph.plot(data2, pen='b')
    log ("Luck graph plotted.")
Esempio n. 4
0
def populate(num_srs = 10, num_users = 1000, num_links = 100, num_comments = 20, num_votes = 50):
    try:
        a = Account._by_name(g.system_user)
    except NotFound:
        a = register(g.system_user, "password", "127.0.0.1")

    srs = []
    for i in range(num_srs):
        name = "reddit_test%d" % i
        try:
            sr = Subreddit._new(name = name, title = "everything about #%d"%i,
                                ip = '0.0.0.0', author_id = a._id)
            sr._downs = 10
            sr.lang = "en"
            sr._commit()
        except SubredditExists:
            sr = Subreddit._by_name(name)
        srs.append(sr)

    accounts = []
    for i in range(num_users):
        name_ext = ''.join([ random.choice(string.letters)
                             for x
                             in range(int(random.uniform(1, 10))) ])
        name = 'test_' + name_ext
        try:
            a = register(name, name, "127.0.0.1")
        except AccountExists:
            a = Account._by_name(name)
        accounts.append(a)

    for i in range(num_links):
        id = random.uniform(1,100)
        title = url = 'http://google.com/?q=' + str(id)
        user = random.choice(accounts)
        sr = random.choice(srs)
        l = Link._submit(title, url, user, sr, '127.0.0.1')
        queries.new_link(l)

        comments = [ None ]
        for i in range(int(random.betavariate(2, 8) * 5 * num_comments)):
            user = random.choice(accounts)
            body = ' '.join([ random_word(1, 10)
                              for x
                              in range(int(200 * random.betavariate(2, 6))) ])
            parent = random.choice(comments)
            (c, inbox_rel) = Comment._new(user, l, parent, body, '127.0.0.1')
            queries.new_comment(c, inbox_rel)
            comments.append(c)
            for i in range(int(random.betavariate(2, 8) * 10)):
                another_user = random.choice(accounts)
                queries.queue_vote(another_user, c, True, '127.0.0.1')

        like = random.randint(50,100)
        for i in range(int(random.betavariate(2, 8) * 5 * num_votes)):
           user = random.choice(accounts)
           queries.queue_vote(user, l, random.randint(0, 100) <= like, '127.0.0.1')

    queries.worker.join()
Esempio n. 5
0
        def __init__(self,parents=None,separators=None,vision=None,memory=None,responses=None,heuristics=None):
                self.parents=parents
                self.generation = 0
                if self.parents != None:
                    self.generation = parents.generation + 1
                self.living_instantiations =0 #add one for each born with this genome subtract for each death
                self.instantiations =0 #add one for each born with this genome  DO NOT subtract for each death
                if vision==None:
                       self.vision = abs(int(random.gauss(0,2)))
                else:
                        self.vision = vision
                if memory==None:
                        self.memory = abs(int(random.gauss(0,2)))
                else:
                        self.memory = memory
                if separators==None:
                       num_separators = abs(int(random.gauss(0,1)))+1
                       self.separators = []
                       #TODO Problem A
                       for i in range(num_separators):
                           self.separators = self.separators + [random_sep()]
                       self.separators.sort()
                else:
                        self.separators=separators
                if (responses==None):
                        if (self.memory != 0 and num_separators > 0):
                            num_responses = int(1/random.betavariate(2,1))-1
                            self.responses = []
                            for i in range(num_responses):
                                    #TODO ensure that there are no responses to
                                    #    the same coordinates
                                    #    or to empty coordinates
                                    num_coordinates = random.choice(range(self.memory*self.vision+2)[1:])
                                    coords = tuple(random_configuration(self.vision,self.memory,self.separators) for i in range(num_coordinates))
                                    agent_action = random_action()
                                    if len(coords)>0:
                                        self.responses.append((coords,agent_action))
                        else:
                            self.responses = []
                else:
                        self.responses= responses
                if heuristics==None:
                        self.heuristics=[]
                        num_heuristics = int(1/random.betavariate(2,1))
                        for i in range(num_heuristics):
                                agent_action = random_action()
                                self.heuristics.append(agent_action)
                        if num_heuristics == 0:
                                self.heuristics.append(random_action())
                else:
                        self.heuristics= heuristics

                assert len(self.heuristics)>0,pdb.set_trace()
                self.responses, self.heuristics = prune(self.responses,self.heuristics,self.vision,self.memory,self.separators)
                self.complexity = self.complexity_estimate()
                #assign the minimum fitness of parents
                self.fitness =0
                self.reproductions = 0
    def die(self,aArea,vPInParameters,vPMoveParameters,aPDie):
        # Remove the mosquito from the target
        if self.inside == 1:
            self.target.removeInsideMosquito(self)
        else:
            self.target.removeOutsideMosquito(self)

        # Select a random target for the mosquito and create it
        # Get PIn parameters
        cPInHeterogeneityIndicator = vPInParameters[0]
        cPInMaleAll = vPInParameters[1]
        cPInMaleBetaA = vPInParameters[2]
        cPInMaleBetaB = vPInParameters[3]
        cPInFemaleAll = vPInParameters[4]
        cPInFemaleBetaA = vPInParameters[5]
        cPInFemaleBetaB = vPInParameters[6]

        # Get the PMove parameters
        cPMoveHeterogeneityIndicator = vPMoveParameters[0]
        cPMoveMaleAll = vPMoveParameters[1]
        cPMoveMaleBetaA = vPMoveParameters[2]
        cPMoveMaleBetaB = vPMoveParameters[3]
        cPMoveFemaleAll = vPMoveParameters[4]
        cPMoveFemaleBetaA = vPMoveParameters[5]
        cPMoveFemaleBetaB = vPMoveParameters[6]

        if self.getSex()=="male":
            vTargets = aArea.getSwarmList()
            if cPInHeterogeneityIndicator == 0:
                aPIn = cPInMaleAll
            else:
                aPIn = random.betavariate(cPInMaleBetaA,cPInMaleBetaB)
            if cPMoveHeterogeneityIndicator == 0:
                aPMove = cPMoveMaleAll
            else:
                aPMove = random.betavariate(cPMoveMaleBetaA,cPMoveMaleBetaB)
        else:
            vTargets = aArea.getHouseList()
            if cPInHeterogeneityIndicator == 0:
                aPIn = cPInFemaleAll
            else:
                aPIn = random.betavariate(cPInFemaleBetaA,cPInFemaleBetaB)
            if cPMoveHeterogeneityIndicator == 0:
                aPMove = cPMoveFemaleAll
            else:
                aPMove = random.betavariate(cPMoveFemaleBetaA,cPMoveFemaleBetaB)

        CRandIndex = random.randint(0,len(vTargets)-1)
        aMosquito = mosquito(vTargets[CRandIndex],self.getSex(),aPIn,aPMove,aPDie)

        # Move new mosquito inside with probability
        cRandIn = random.random()
        if cRandIn < aPIn:
            aMosquito.moveInside()
def getTrueValue_beta(true_value_set, multiplier):
    # to pick the true value using a beta distribution
    # Attention!! processing in this way the beta, it tends to be a exponential curve ---_> we named it EXP distribution
    rndm_nb = int(random.betavariate(1,2) * multiplier)
    value_index = int (((rndm_nb * len(true_value_set))) / multiplier)
    #in this way every index is ok. it works when true value set len is < 100 or multiplier
    while value_index < 0 or value_index >= len(true_value_set):
        rndm_nb = int(random.betavariate(1,2) * multiplier)
        value_index = int (((rndm_nb * len(true_value_set))) / multiplier)

    value = true_value_set[value_index]
    #do not remove it, it could be choosen again
    return value
Esempio n. 8
0
    def inversion(self):
        ''' simulate twin-priming inversion, return header note, seq '''
        offset = int(.3*len(self.dna)*betavariate(1.5,2))
        inv_pt = len(self.dna) - offset                        # inversion point
        t_start = int((len(self.dna)-offset)*betavariate(4,1)) # start relative to insertion ref
        delta = int(uniform(-10,10))                           # internal dup/del
        
        note = 'trunc_start=%d,inv_loc=%d,inv_slop=%d,ins_len=%d' % (t_start, inv_pt, delta, len(self.dna))

        end5p = self.dna[t_start:inv_pt]
        end3p = self.dna[inv_pt-delta:len(self.dna)]

        return note, rc(end5p) + end3p
def generate_user_file(
        limit, artists_per_user_limit=100, artist_id_limit=1000000):
    """
    generates files like
    4587|547:1|6984:0.98|147856:0.05
    uid|artist_id:score|artist_id:score
    """
    with open(user_file, 'w+') as f:
        for i in range(limit):
            line = [str(i)]
            nb_artists = int(betavariate(2, 2) * artists_per_user_limit)
            for _ in range(nb_artists):
                line.append(str(int(
                    betavariate(2, 5) * artist_id_limit)) + ':' + str(random()))
            f.write('|'.join(line) + '\n')
def thompson_sampling(p,numSamples):
	###########################################################################
	# Thompason Sampling Algorthm
	#
	# a = # numbers of successes of each variant 
	# b = # numbers of failures of each variant
	# Initialize priors with ignorant state of Beta(1,1) (Uniform distribution)
	#
	a = np.ones( np.size(p) )
	b = np.ones( np.size(p) )

	# draw from beta distribution for each variant
	for i in range(numSamples):
		draw = np.zeros( np.size(p) )
		for i in range( np.size(a) ):
			draw[i] = random.betavariate(a[i],b[i])
		
		# Select the variant with the largest numbers drawn from the beta distribution
		selected_arm = np.amax(draw) == draw
		
		# Test and observe the result of the selcted arm
		U = random.random()
		success = U < p[selected_arm]
		failure = U > p[selected_arm]
		
		# Update prior beta distribution for selected arm
		a[selected_arm] = a[selected_arm] + success
		b[selected_arm] = b[selected_arm] + failure
		
	return a, b
Esempio n. 11
0
def simulate(n, alpha, iterations):
	SUM=0
	for j in alpha:
		SUM+=j
	l=len(alpha)
	avgValues = list()
	for i in range(0, l):
		avgValues.append(0)
	for i in range(iterations):
		alphaSum=SUM
		variates=list()
		for j in range(l-1):
			variates.append(random.betavariate(alpha[j], alphaSum-alpha[j]))
			alphaSum-=alpha[j]
		#print(variates) // uncomment this to print variates.
		prod=1
		sample = list() 
		for j in range(l-1):
			sample.append(variates[j]*prod)
			avgValues[j]+=variates[j]*prod
			prod*=(1-variates[j])
		avgValues[l-1]+=(1-sum(sample)) #the order is important here...
		sample.append(1-sum(sample))
	for i in range(0,l):
		avgValues[i]/=iterations
	print(avgValues)
Esempio n. 12
0
    def random():
        names = RACES
        colours = COLOURS
        languages = LANGUAGES

        origins = Origins()

        # Returns a beta random rumber, closer to 1 than len(RACES). This allows a random number of different origins.
        number_of_ethnicities = int(random.betavariate(2, 3) * len(names)) + 1
        ethnicities = random.sample(names, number_of_ethnicities)

        for i, this_ethnicity in enumerate(ethnicities):
            others = names.copy()
            others.remove(this_ethnicity)

            liked_ethnicities = random.sample(others, number_of_ethnicities-1)
            hated_ethnicities = random.sample(others, number_of_ethnicities-1)

            eth = Ethnicity(this_ethnicity, colours[i], languages[i], liked_ethnicities, hated_ethnicities)

            origins[eth] = random.uniform(0, 1)

        total_weight = sum(o[1] for o in origins.items())
        for key in origins.keys():
            origins[key] /= total_weight

        return origins
Esempio n. 13
0
 def _decorator(citation_id):
     t = random.betavariate(cfg.SLEEP_ALPHA, cfg.SLEEP_BETA) \
             * (cfg.SLEEP_TIME_RANGE[1] - cfg.SLEEP_TIME_RANGE[0]) + cfg.SLEEP_TIME_RANGE[0]
     time.sleep(t)
     text = func(citation_id)
     # time.sleep(random.uniform(cfg.SLEEP_TIME_RANGE[0], cfg.SLEEP_TIME_RANGE[1]))
     return text
Esempio n. 14
0
    def _arm_guess(self, participant_count, completed_count):
        fairness_score = 7

        a = max([participant_count, 0])
        b = max([participant_count - completed_count, 0])

        return random.betavariate(a + fairness_score, b + fairness_score)
Esempio n. 15
0
def generateBetaRVs(alpha, beta, count, histDelta):

    rVec = numpy.zeros(count)
    rMax = 0.
    for ii in range(count):
        rVec[ii] = random.betavariate(alpha, beta)
        if (rMax < rVec[ii]):
            rMax = rVec[ii]

    # build the histogram ...
    deltaR = histDelta
    rMinHist = 0.
    rMaxHist = rMax + 2. * deltaR
    numBins = (rMaxHist - rMinHist) / deltaR
    numBins = int(numBins) + 2

    rHist = numpy.zeros(numBins)

    print ' making histogram ... ', deltaR, rMaxHist
    for ii in range(count):
        iBin = int((rVec[ii] - rMinHist) / deltaR + 0.0001)
        rHist[iBin] += 1

    for iBin in range(numBins):
        rHist[iBin] /= float(count)

    return (rVec, rHist)
Esempio n. 16
0
 def _beta_distrib(self):
     """
     Define a pseudorandom size according to a beta distribution giving alpha and beta,
     comprise between sonic_min and sonic_max
     @return A size of the fragment (int)
     """
     return int(betavariate(self.alpha, self.beta) * (self.sonic_max - self.sonic_min) + self.sonic_min)
Esempio n. 17
0
    def __init__(self, n_ants, n_sites, search_prob, quorum_size, site_qual, test = False):
        
        self.n_ants  = n_ants
        self.site_quals = [None]*n_sites
        for site in range(n_sites):
            if site_qual == 'random':
                self.site_quals[site] = random.betavariate(1,1)
            else:
                assert site_qual > 0 and site_qual < 1
                self.site_quals[site] = site_qual
        self.n_sites = len(self.site_quals)
        self.search_prob = search_prob        

        self.ants = [[AT_HOME, None] for i in range(self.n_ants)]
        
        self.at_home = dict(zip(range(self.n_ants), self.n_ants*[True]))
        self.at_site = [0]*self.n_sites
        self.know_site = [0]*self.n_sites
        self.quorum_size = quorum_size
        
        self.going_home = {}
        self.going_to_site = {}
        
        self.quorum_times = {}
        
        self.test = test
Esempio n. 18
0
def get_random_date():
  # http://www.wolframalpha.com/input/?i=beta+distribution%2C+alpha%3D1.5%2C+beta%3D5
  random_year = date.today().year - int(MAX_YEARS_BACK * random.betavariate(1.1, 7))

  start_date = date(random_year, 1, 1).toordinal()
  end_date = date(random_year, 12, 31).toordinal()
  return date.fromordinal(random.randint(start_date, end_date))
Esempio n. 19
0
def bursty_log_lines():
    cans=0
    log=logging.getLogger("raid_sprayer")
    while True:
        cans+=1
        log.debug("i am debugging something, i have used %09d cans of raid",cans)
        yield from asyncio.sleep(random.betavariate(5,1)* 10)
Esempio n. 20
0
def psa(parameters, parnames, predictions, prednames, ss):
    with open(os.path.join(rootDir, 'output', 'PSA_' + SA_id + '_wide.txt'), 'wb') as outfile_wide,\
        open(os.path.join(rootDir, 'output', 'PSA_' + SA_id + '_long.txt'), 'wb') as outfile_long:
        wide_writer = csv.writer(outfile_wide, delimiter='\t')
        wide_writer.writerow(parnames + prednames)
        long_writer = csv.writer(outfile_long, delimiter='\t')
        long_writer.writerow(['parname', 'parvalue', 'outcome', 'value'])
        for sample in range(n_samples):
            parvalues = []
            for param in parameters:
                if not param['distribution']:
                    continue
                distribution = param['distribution'].split(':')
                if distribution[0] == 'triangular':
                    value = random.triangular(float(distribution[1]), float(distribution[2]), float(distribution[3]))
                elif distribution[0] == 'uniform':
                    value = random.uniform(float(distribution[1]), float(distribution[2]))
                elif distribution[0] == 'integer':
                    value = random.randint(float(distribution[1]), float(distribution[2]))
                elif distribution[0] == 'beta':
                    value = random.betavariate(float(distribution[1]), float(distribution[2]))
                else:
                    # this is a constant, just use mode
                    value = float(param['mode'])
                parvalues.append(value)
                param_sheet = ss.Sheets(param['sheet'])
                set_param_values(param, param_sheet, value)
            predvalues = []
            for pred, predname in zip(predictions, prednames):
                shOut = ss.Sheets(pred['sheet'])
                value = shOut.Cells(pred['row'], pred['col']).Value
                predvalues.append(value)
                for paramvalue, parname in zip(parvalues, parnames):
                    long_writer.writerow([parname, paramvalue, predname, value])
            wide_writer.writerow(parvalues + predvalues)
Esempio n. 21
0
def validate_random_sampling(n,q1q2list,m):
    '''Create a table of random sampling vs systematic sampling for
    given q1,q2,m for n=1 upto maxn.
    Ranodom sampling sues different schemes for choosing the Erdos-Renyi edge probability
    '''
    writer = csv.writer(open('results_validate/validate_m{}.csv'.format(m), 'wb'))
    print '[n, q1, q2, truep, randomp_50, randomp_beta1515, random_uniform]'
    writer.writerow(['n', 'q1', 'q2', 'truep', 'randomp_50', 'randomp_beta1515', 'random_uniform'])
    for q1,q2 in q1q2list:
        truep = goodfraction_systematic(n,q1,q2)[0]
        randomp_50 = goodfraction_sample(n,q1,q2,m,lambda: 0.5)[0]
        randomp_beta1515 = goodfraction_sample(n,q1,q2,m,lambda: random.betavariate(1.5,1.5))[0]
        random_uniform = goodfraction_sample(n,q1,q2,m,lambda: random.betavariate(1,1))[0]

        print [n, q1, q2, truep, randomp_50, randomp_beta1515, random_uniform]
        writer.writerow([n, q1, q2, truep, randomp_50, randomp_beta1515, random_uniform])
    def select_arm(self, p):

        for arm in range(self.n_arms):
            self.values[arm] = random.betavariate(alpha=(np.sum(self.successes[arm, p-1:p+1])+1),
                                                  beta=(np.sum(self.fails[arm, p-1:p+1])+1))

        return np.argmax(self.values[:, p])
Esempio n. 23
0
def chooser( results ):
    l = len(results)
    ramp = [1 - 1.0*i/l for i in (range(0,l))]
    printlist(ramp)
    framp = [1/(1.0+choices.get(int(x),0)) for x in results]
    printlist(framp)
    cramp = [ramp[i] * framp[i] for i in range(0,l)]
    printlist(cramp)
    cramp[int((l-1)*random.betavariate(1,3))] *= 100
    cramp[int((l-1)*random.betavariate(1,3))] *= 100
    cramp[int((l-1)*random.betavariate(1,3))] *= 100
    myi = cramp.index(max(cramp))
    choice = int(results[myi])
    choices[choice] = choices.get(choice,0) + 1
    print "Choice:%d Myi:%d" % (choice, myi)
    return choice
Esempio n. 24
0
def random_beta(a = 5, b = 5):
    """
    Computes beta distributed value between 0 and 1 with given alpha and beta
    """ 
    
    x = random.betavariate(a, b)
    return x
Esempio n. 25
0
File: dpgmm.py Progetto: hjanime/CSI
  def sampleMixture(self):
    """Once solve has been called and a distribution over models determined
    this allows you to draw a specific model. Returns a 2-tuple, where the
    first entry is an array of weights and the second entry a list of Gaussian
    distributions - they line up, to give a specific Gaussian mixture model. For
    density estimation the probability of a specific point is then the sum of each
    weight multiplied by the probability of it comming from the associated Gaussian.
    For clustering the probability of a specific point belonging to a cluster is
    the weight multiplied by the probability of it comming from a specific Gaussian,
    normalised for all clusters. Note that this includes an additional term to cover
    the infinite number of terms that follow, which is really an approximation, but
    tends to be such a small amount as to not matter. Be warned that if doing clustering
    a point could be assigned to this 'null cluster', indicating that the model thinks the
    point belongs to an unknown cluster (i.e. one that it doesn't have enough information,
    or possibly sticks, to instanciate.)."""
    weight = numpy.empty(self.stickCap+1, dtype=numpy.float64)
    stick = 1.0
    for i in xrange(self.stickCap):
      val = random.betavariate(self.v[i,0], self.v[i,1])
      weight[i] = stick * val
      stick *= 1.0 - val
    weight[-1] = stick

    gauss = map(lambda x: x.sample(), self.n)
    gauss.append(self.prior.sample())

    return (weight,gauss)
Esempio n. 26
0
def random_beta_int(x1 = -1, x2 = 1, a = 5, b = 5):
    """
    Computes beta distributed value between x1 and x2 with given alpha and beta
    """ 
    
    x = x1 + random.betavariate(a, b) * (x2 - x1)
    return x 
    def select_arm(self, t):

        for arm in range(len(self.successes)):
            self.values[arm] = random.betavariate(
                alpha=(self.successes[arm] + 1),
                beta=(self.fails[arm]+1))

        return np.argmax(self.values)
Esempio n. 28
0
def query(_title):
    time.sleep(betavariate(2, 2)/2)  # to prevent overrequesting Google's server
    q.set_phrase(_title)
    querier.send_query(q)
    q_title = querier.articles[0].attrs['title'][0]
    q_num_cit = querier.articles[0].attrs['num_citations'][0]
    print((q_title, q_num_cit))
    return (q_title, q_num_cit)
Esempio n. 29
0
def main(pingpong_mike, pingpong_dean, pool_dean, pool_mike):

    # trick: pseudocount of 1 is a uniform beta prior
    pseudocount = 1.0

    total = 0
    n_tries = 100000
    for i in range(n_tries):
        pingpong_bias = random.betavariate(pingpong_mike + pseudocount,
                                           pingpong_dean + pseudocount)
        pool_bias = random.betavariate(pool_dean + pseudocount,
                                       pool_mike + pseudocount)
        if pingpong_bias > pool_bias:
            total += 1

    p = float(total) / n_tries
    print 'Probability that Mike is more better: %.2f' % p
Esempio n. 30
0
 def _sample_onlist_position(self, confusion_des, length):
     alpha = confusion_des["onlist_fraction_alpha"]
     beta = confusion_des["onlist_fraction_beta"]
     x = random.betavariate(alpha, beta)
     position = int((length - 1) * x) + 1
     if position == length:
         position -= 1
     return position
Esempio n. 31
0
			# generate strata dimensions
			N = randint(5,21)							#number of strata
			Ni = [randint(10,201) for i in range(N)]	#strata sizes
			m = mperN * N								#sample budget
			while (sum(Ni)<m):							#regenerate while sample budget is too large
				N = randint(5,21)
				Ni = [randint(10,201) for i in range(N)]
				m = mperN * N

			#generate population data values for the strata
			vals = []
			for i in range(N):
				alpha = random()*4
				beta = random()*4
				vals.append([betavariate(alpha,beta) for ii in range(Ni[i])])

			#calculate actual population mean
			collected_vals = sum(vals,[])
			mean = sum(collected_vals)*1.0/len(collected_vals)

			#calculate error in using SEBM* method
			cvals = copy(vals)
			sampling_errors[0].append(abs(mean-sebm_ideal(cvals,m,d)))

			#calculate error in using SEBM method
			cvals = copy(vals)
			sampling_errors[1].append(abs(mean-sebm(cvals,m,d)))

			#calculate error in using SEBM method with replacement
			cvals = copy(vals)
Esempio n. 32
0
def test_sequence(engine_api, sequence_test_collection):
    """
    Simulates a student doing questions in the collection returned by sequence_test_collection fixture
    :param engine_api: fixture returning api client
    :param sequence_test_collection: fixture returning collection
    :return:
    """
    collection = sequence_test_collection
    activities = collection.activity_set.all()
    collection_id = collection.collection_id
    LEARNER = dict(user_id='my_user_id', tool_consumer_instance_guid='default')

    random.seed(1)
    sequence = []
    for i in range(len(activities)):
        # recommend activity to learner
        r = engine_api.recommend(
            learner=LEARNER,
            collection=collection_id,
            sequence=sequence,
        )
        assert r.ok
        sleep(
            0.1
        )  # pytest-django local live server doesn't like requests too close to each other
        print("Engine recommendation response: {}".format(r.json()))
        recommended_activity = r.json()['source_launch_url']

        activity = Activity.objects.get(url=recommended_activity)

        # simulate student response
        sequence_item = {
            'activity': recommended_activity,
            'score': random.betavariate(i + 1,
                                        len(activities) - i + 1),
            'is_problem': True if activity.type == 'problem' else False,
        }
        sequence.append(sequence_item)
        if sequence_item['is_problem']:
            # submit score to api
            r = engine_api.submit_score(learner=LEARNER,
                                        activity=sequence_item['activity'],
                                        score=sequence_item['score'])
            assert r.ok
            sleep(0.1)

            learner = Learner.objects.get(**LEARNER)
            mastery = Mastery.objects.filter(learner=learner).values_list(
                'value', flat=True)
            # test that learner mastery values are between epsilon and (1-epsilon)
            assert all([EPSILON <= x <= (1 - EPSILON) for x in mastery])
    print("Final sequence:")
    for a in sequence:
        print(a)

    # test grade after sequence
    data = {'learner': LEARNER}
    r = engine_api.request(
        'POST',
        f'collection/{sequence_test_collection.collection_id}/grade',
        json=data)
    print(f'grade after sequence: {r.json()}')
    assert r.ok
Esempio n. 33
0
 def get_ko_cutoff(self):
     y = -1
     while y <= self.damage:
         y = floor((self.death_threshold + 10) * betavariate(7, 4))
     return y
Esempio n. 34
0
    def transform(self, X, y):
        """Resample the dataset.

        Parameters
        ----------
        X : ndarray, shape (n_samples, n_features)
            Matrix containing the data which have to be sampled.

        y : ndarray, shape (n_samples, )
            Corresponding label for each sample in X.

        Returns
        -------
        X_resampled : ndarray, shape (n_samples_new, n_features)
            The array containing the resampled data.

        y_resampled : ndarray, shape (n_samples_new)
            The corresponding label of `X_resampled`

        """
        # Check the consistency of X and y
        X, y = check_X_y(X, y)

        # Call the parent function
        super(SMOTE, self).transform(X, y)

        # Define the number of sample to create
        # We handle only two classes problem for the moment.
        if self.ratio_ == 'auto':
            num_samples = (self.stats_c_[self.maj_c_] -
                           self.stats_c_[self.min_c_])
        else:
            num_samples = ((self.ratio_ * self.stats_c_[self.maj_c_]) -
                           self.stats_c_[self.min_c_])

        # Start by separating minority class features and target values.
        X_min = X[y == self.min_c_]

        # If regular SMOTE is to be performed
        if self.kind == 'regular':

            # Print if verbose is true#
            if self.verbose:
                print('Finding the {} nearest neighbours...'.format(self.k))

            # Look for k-th nearest neighbours, excluding, of course, the
            # point itself.
            self.nearest_neighbour_.fit(X_min)

            # Matrix with k-th nearest neighbours indexes for each minority
            # element.
            nns = self.nearest_neighbour_.kneighbors(X_min,
                                                     return_distance=False)[:,
                                                                            1:]

            # Print status if verbose is true
            if self.verbose:
                print("done!")
                print("Creating synthetic samples...", end="")

            # --- Generating synthetic samples
            # Use static method make_samples to generate minority samples
            X_new, y_new = self._make_samples(X_min, self.min_c_, X_min, nns,
                                              num_samples, 1.0)

            if self.verbose:
                print("done!")

            # Concatenate the newly generated samples to the original data set
            X_resampled = np.concatenate((X, X_new), axis=0)
            y_resampled = np.concatenate((y, y_new), axis=0)

            return X_resampled, y_resampled

        if self.kind == 'borderline1' or self.kind == 'borderline2':

            if self.verbose:
                print("Finding the {} nearest neighbours...".format(self.m))

            # Find the NNs for all samples in the data set.
            self.nearest_neighbour_.fit(X)

            if self.verbose:
                print("done!")

            # Boolean array with True for minority samples in danger
            danger_index = self._in_danger_noise(X_min, y, kind='danger')

            # If all minority samples are safe, return the original data set.
            if not any(danger_index):
                if self.verbose:
                    print('There are no samples in danger. No borderline '
                          'synthetic samples created.')

                # All are safe, nothing to be done here.
                return X, y

            # If we got here is because some samples are in danger, we need to
            # find the NNs among the minority class to create the new synthetic
            # samples.
            #
            # We start by changing the number of NNs to consider from m + 1
            # to k + 1
            self.nearest_neighbour_.set_params(**{'n_neighbors': self.k + 1})
            self.nearest_neighbour_.fit(X_min)

            # nns...#
            nns = self.nearest_neighbour_.kneighbors(X_min[danger_index],
                                                     return_distance=False)[:,
                                                                            1:]

            # B1 and B2 types diverge here!!!
            if self.kind == 'borderline1':
                # Create synthetic samples for borderline points.
                X_new, y_new = self._make_samples(X_min[danger_index],
                                                  self.min_c_, X_min, nns,
                                                  num_samples)

                # Concatenate the newly generated samples to the original
                # dataset
                X_resampled = np.concatenate((X, X_new), axis=0)
                y_resampled = np.concatenate((y, y_new), axis=0)

                # Reset the k-neighbours to m+1 neighbours
                self.nearest_neighbour_.set_params(
                    **{'n_neighbors': self.m + 1})

                return X_resampled, y_resampled

            else:
                # Split the number of synthetic samples between only minority
                # (type 1), or minority and majority (with reduced step size)
                # (type 2).
                np.random.seed(self.rs_)

                # The fraction is sampled from a beta distribution centered
                # around 0.5 with variance ~0.01
                fractions = betavariate(alpha=10, beta=10)

                # Only minority
                X_new_1, y_new_1 = self._make_samples(X_min[danger_index],
                                                      self.min_c_,
                                                      X_min,
                                                      nns,
                                                      int(fractions *
                                                          (num_samples + 1)),
                                                      step_size=1.)

                # Only majority with smaller step size
                X_new_2, y_new_2 = self._make_samples(X_min[danger_index],
                                                      self.min_c_,
                                                      X[y != self.min_c_],
                                                      nns,
                                                      int((1 - fractions) *
                                                          num_samples),
                                                      step_size=0.5)

                # Concatenate the newly generated samples to the original
                # data set
                X_resampled = np.concatenate((X, X_new_1, X_new_2), axis=0)
                y_resampled = np.concatenate((y, y_new_1, y_new_2), axis=0)

                # Reset the k-neighbours to m+1 neighbours
                self.nearest_neighbour_.set_params(
                    **{'n_neighbors': self.m + 1})

                return X_resampled, y_resampled

        if self.kind == 'svm':
            # The SVM smote model fits a support vector machine
            # classifier to the data and uses the support vector to
            # provide a notion of boundary. Unlike regular smote, where
            # such notion relies on proportion of nearest neighbours
            # belonging to each class.

            # Fit SVM to the full data#
            self.svm_.fit(X, y)

            # Find the support vectors and their corresponding indexes
            support_index = self.svm_.support_[y[self.svm_.support_] ==
                                               self.min_c_]
            support_vector = X[support_index]

            # First, find the nn of all the samples to identify samples
            # in danger and noisy ones
            if self.verbose:
                print("Finding the {} nearest neighbours...".format(self.m))

            # As usual, fit a nearest neighbour model to the data
            self.nearest_neighbour_.fit(X)

            if self.verbose:
                print("done!")

            # Now, get rid of noisy support vectors

            noise_bool = self._in_danger_noise(support_vector, y, kind='noise')

            # Remove noisy support vectors
            support_vector = support_vector[np.logical_not(noise_bool)]
            danger_bool = self._in_danger_noise(support_vector,
                                                y,
                                                kind='danger')
            safety_bool = np.logical_not(danger_bool)

            if self.verbose:
                print("Out of {0} support vectors, {1} are noisy, "
                      "{2} are in danger "
                      "and {3} are safe.".format(
                          support_vector.shape[0],
                          noise_bool.sum().astype(int),
                          danger_bool.sum().astype(int),
                          safety_bool.sum().astype(int)))

                # Proceed to find support vectors NNs among the minority class
                print("Finding the {} nearest neighbours...".format(self.k))

            self.nearest_neighbour_.set_params(**{'n_neighbors': self.k + 1})
            self.nearest_neighbour_.fit(X_min)

            if self.verbose:
                print("done!")
                print("Creating synthetic samples...", end="")

            # Split the number of synthetic samples between interpolation and
            # extrapolation

            # The fraction are sampled from a beta distribution with mean
            # 0.5 and variance 0.01#
            np.random.seed(self.rs_)
            fractions = betavariate(alpha=10, beta=10)

            # Interpolate samples in danger
            if np.count_nonzero(danger_bool) > 0:
                nns = self.nearest_neighbour_.kneighbors(
                    support_vector[danger_bool], return_distance=False)[:, 1:]

                X_new_1, y_new_1 = self._make_samples(
                    support_vector[danger_bool],
                    self.min_c_,
                    X_min,
                    nns,
                    int(fractions * (num_samples + 1)),
                    step_size=1.)

            # Extrapolate safe samples
            if np.count_nonzero(safety_bool) > 0:
                nns = self.nearest_neighbour_.kneighbors(
                    support_vector[safety_bool], return_distance=False)[:, 1:]

                X_new_2, y_new_2 = self._make_samples(
                    support_vector[safety_bool],
                    self.min_c_,
                    X_min,
                    nns,
                    int((1 - fractions) * num_samples),
                    step_size=-self.out_step)

            if self.verbose:
                print("done!")

            # Concatenate the newly generated samples to the original data set
            if (np.count_nonzero(danger_bool) > 0
                    and np.count_nonzero(safety_bool) > 0):
                X_resampled = np.concatenate((X, X_new_1, X_new_2), axis=0)
                y_resampled = np.concatenate((y, y_new_1, y_new_2), axis=0)
            # not any support vectors in danger
            elif np.count_nonzero(danger_bool) == 0:
                X_resampled = np.concatenate((X, X_new_2), axis=0)
                y_resampled = np.concatenate((y, y_new_2), axis=0)
            # All the support vector in danger
            elif np.count_nonzero(safety_bool) == 0:
                X_resampled = np.concatenate((X, X_new_1), axis=0)
                y_resampled = np.concatenate((y, y_new_1), axis=0)

            # Reset the k-neighbours to m+1 neighbours
            self.nearest_neighbour_.set_params(**{'n_neighbors': self.m + 1})

            return X_resampled, y_resampled
Esempio n. 35
0
def thompson_sampling(scores, percentage_scores):
    N = 10000  # total number of rounds (customers connecting to website)
    d = 8  # number of strategies

    # Creating Simulation

    conversion_rates = [element[1] for element in percentage_scores
                        ]  # get only the values
    X = np.array(np.zeros([N, d]))  # create zeros array

    for i in range(N):
        for j in range(d):  # Bernoulli distribution
            if np.random.rand() <= conversion_rates[j]:
                X[i, j] = 1

    # Implementing Random Strategy vs Thomson Sampling

    strategies_selected_rs = []
    strategies_selected_ts = []
    total_reward_rs = 0
    total_reward_ts = 0
    numbers_of_rewards_1 = [0] * d
    numbers_of_rewards_0 = [0] * d

    for n in range(0, N):  # for each round
        # Random Strategy
        strategy_rs = random.randrange(d)  # select random 0-8 strategy
        strategies_selected_rs.append(
            strategy_rs)  # append to list of random strategies
        reward_rs = X[
            n,
            strategy_rs]  # compare selected action with "real life simulation" X and get assigned reward
        total_reward_rs += reward_rs  # get total reward

        # Thomson Sampling
        strategy_ts = 0
        max_random = 0
        for i in range(0, d):  # for each strategy
            # compare how many times till now that strategy recieved 1 or 0 to get the Random Draw
            random_beta = random.betavariate(numbers_of_rewards_1[i] + 1,
                                             numbers_of_rewards_0[i] + 1)
            # update random beta for each strategy
            if random_beta > max_random:
                max_random = random_beta
                strategy_ts = i

        reward_ts = X[
            n,
            strategy_ts]  # compare selected action with "real life simulation" X and get assigned reward
        # update number of rewards
        if reward_ts == 1:
            numbers_of_rewards_1[strategy_ts] += 1
        else:
            numbers_of_rewards_0[strategy_ts] += 1
        # append to list of ts strategies
        strategies_selected_ts.append(strategy_ts)
        # accumulate total ts rewards
        total_reward_ts += reward_ts

    # For Histograms
    thompson_counter = Counter(strategies_selected_ts)
    thompson_strategies = dict(thompson_counter)
    top_ts = max(thompson_strategies.items(), key=operator.itemgetter(1))[0]
    top_ts_count = thompson_strategies.get(top_ts)

    random_counter = Counter(strategies_selected_rs)
    random_strategies = dict(random_counter)
    top_rs_count = random_strategies.get(top_ts)

    # Replace id's with ad names

    scores_list = []
    for key, value in scores.items():
        temp = [key, value]
        scores_list.append(temp)

    random_list = []
    for key, value in random_strategies.items():
        temp = [key, value]
        random_list.append(temp)

    thompson_list = []
    for key, value in thompson_strategies.items():
        temp = [key, value]
        thompson_list.append(temp)

    random_list.sort(key=lambda x: x[0])
    thompson_list.sort(key=lambda x: x[0])

    random_list = [a + b for a, b in zip(scores_list, random_list)]
    random_list.sort(key=lambda x: x[3], reverse=True)
    thompson_list = [a + b for a, b in zip(scores_list, thompson_list)]
    thompson_list.sort(key=lambda x: x[3], reverse=True)

    top_score = thompson_list[0][0]

    for element in thompson_list:
        element[0] = element[0].replace('_', ', ')
        element[0] = element[0].replace('female', 'girl')
        element[0] = element[0].title()

    for element in random_list:
        element[0] = element[0].replace('_', ', ')
        element[0] = element[0].replace('female', 'girl')
        element[0] = element[0].title()

    # Compute the Absolute and Relative Return

    absolute_return = int((total_reward_ts - total_reward_rs) *
                          1000)  # each customer converion = 1000 USD
    relative_return = int(
        (total_reward_ts - total_reward_rs) / total_reward_rs * 100)

    algorithm_results = {
        'conversion_rates': conversion_rates,
        'absolute_return': absolute_return,
        'relative_return': relative_return,
        'thompson_list': thompson_list,
        'random_list': random_list,
        'top_ts_count': top_ts_count,
        'top_rs_count': top_rs_count,
        'scores_list': scores_list,
        "top_score": top_score
    }

    return algorithm_results
# Implementing Thompson Sampling
import random

N = 10000  # Users
d = 10  # Nbre d'ad
ads_selected = []
numbers_of_rewards_1 = [0] * d  # number of 1 rewards for each ad
numbers_of_rewards_0 = [0] * d  # number of 0 rewards for each ad
total_reward = 0
for n in range(0, N):
    ad = 0
    max_random = 0  # maximum random draw
    for i in range(0, d):
        random_beta = random.betavariate(
            numbers_of_rewards_1[i] + 1, numbers_of_rewards_0[i] +
            1)  # On obtient un random suivant le Thompson Sampling betavariate
        if random_beta > max_random:  # Si le random_beta est supérieur aux autres, on le choisit
            max_random = random_beta
            ad = i
    ads_selected.append(ad)
    reward = dataset.values[n, ad]

    if reward == 1:
        numbers_of_rewards_1[ad] = numbers_of_rewards_1[ad] + 1
    else:
        numbers_of_rewards_0[ad] = numbers_of_rewards_0[ad] + 1
    total_reward = total_reward + reward

# Visualising the results - Histogram
plt.hist(ads_selected)
Esempio n. 37
0
 def test_betavariate_return_zero(self, gammavariate_mock):
     # betavariate() returns zero when the Gamma distribution
     # that it uses internally returns this same value.
     gammavariate_mock.return_value = 0.0
     self.assertEqual(0.0, random.betavariate(2.71828, 3.14159))
Esempio n. 38
0
 def Random(self):
     """Generates a random variate from this distribution."""
     return random.betavariate(self.alpha, self.beta)
Esempio n. 39
0
import random
random.seed(3)
# call the function here
print(random.betavariate(0.9, 0.1))

Esempio n. 40
0
# Importing the dataset
ds = pd.read_csv("Ads_CTR_Optimisation.csv")

# Implimenting Thompson Sampling
N = 10000  # Number of rows in dataset
d = 10  # Number of ads
adSelected = []
numberOfRewards_0 = [0] * d
numberOfRewards_1 = [0] * d
totalReward = 0
for n in range(0, N):
    ad = 0
    maxRandom = 0
    for i in range(0, d):
        randomBeta = random.betavariate(numberOfRewards_1[i] + 1,
                                        numberOfRewards_0[i] + 1)

        if randomBeta > maxRandom:
            maxRandom = randomBeta
            ad = i  # Index
    adSelected.append(ad)
    reward = ds.values[n, ad]
    if reward == 1:
        numberOfRewards_1[ad] = numberOfRewards_1[ad] + 1
    else:
        numberOfRewards_0[ad] = numberOfRewards_0[ad] + 1
    totalReward = totalReward + reward

# Visualising the results - Histogram
plt.hist(adSelected)
plt.title('Histogram of ads selections')
Esempio n. 41
0
#importing dataset
dataset = pd.read_csv('Ads_CTR_Optimisation.csv')

#apply thompsons sampling model
import random
N = 10000
d = 10
ads_selected = []
total_reward = 0
no_of_1 = [0] * d
no_of_0 = [0] * d
for n in range(0, N):
    max_random = ad = 0
    for i in range(0, d):
        random_draw = random.betavariate(no_of_1[i] + 1, no_of_0[i] + 1)
        if random_draw > max_random:
            max_random = random_draw
            ad = i
    ads_selected.append(ad)
    reward = dataset.values[n, ad]
    total_reward = total_reward + reward
    if reward == 1:
        no_of_1[ad] = no_of_1[ad] + 1
    else:
        no_of_0[ad] = no_of_0[ad] + 1

# plotting of histogram of ads selected
plt.hist(ads_selected)
plt.title('histogram for thompsonsmodel')
plt.xlabel('ad')
plt.xlabel("Ad")
plt.ylabel("Average click yes/no")
plt.show()

# Upper confidence bound algorithm
import random

N = 10000
d = 10
total_rewards = np.zeros(d)
total_chosen = np.zeros(d)
for n in range(0, N):  # N

    # Draw from our current distributions, which come from Bayesian inference
    random_betas = [
        random.betavariate(total_rewards[i] + 1,
                           total_chosen[i] - total_rewards[i] + 1)
        for i in range(d)
    ]

    # select an ad
    ad = np.argmax(random_betas)

    # update the observations with the currently selected add
    total_rewards[ad] += dataset.iloc[n, ad]
    total_chosen[ad] += 1

# step 2, profit
grandtotal_reward = np.sum(total_rewards)

# Visualising the results
plt.bar(range(len(total_chosen)), total_chosen)
Esempio n. 43
0
    def train_one_epoch(self, args, epoch, warmup_epochs=5, warmup_lr=0):
        self.net.train()
        self.run_config.train_loader.sampler.set_epoch(
            epoch)  # required by distributed sampler
        MyRandomResizedCrop.EPOCH = epoch  # required by elastic resolution

        nBatch = len(self.run_config.train_loader)

        losses = DistributedMetric('train_loss')
        metric_dict = self.get_metric_dict()
        data_time = AverageMeter()

        with tqdm(total=nBatch,
                  desc='Train Epoch #{}'.format(epoch + 1),
                  disable=not self.is_root) as t:
            end = time.time()
            for i, (images, labels) in enumerate(self.run_config.train_loader):
                MyRandomResizedCrop.BATCH = i
                data_time.update(time.time() - end)
                if epoch < warmup_epochs:
                    new_lr = self.run_config.warmup_adjust_learning_rate(
                        self.optimizer,
                        warmup_epochs * nBatch,
                        nBatch,
                        epoch,
                        i,
                        warmup_lr,
                    )
                else:
                    new_lr = self.run_config.adjust_learning_rate(
                        self.optimizer, epoch - warmup_epochs, i, nBatch)

                images, labels = images.cuda(), labels.cuda()
                target = labels
                if isinstance(self.run_config.mixup_alpha, float):
                    # transform data
                    random.seed(int('%d%.3d' % (i, epoch)))
                    lam = random.betavariate(self.run_config.mixup_alpha,
                                             self.run_config.mixup_alpha)
                    images = mix_images(images, lam)
                    labels = mix_labels(
                        labels, lam, self.run_config.data_provider.n_classes,
                        self.run_config.label_smoothing)

                # soft target
                if args.teacher_model is not None:
                    args.teacher_model.train()
                    with torch.no_grad():
                        soft_logits = args.teacher_model(images).detach()
                        soft_label = F.softmax(soft_logits, dim=1)

                # compute output
                output = self.net(images)

                if args.teacher_model is None:
                    loss = self.train_criterion(output, labels)
                    loss_type = 'ce'
                else:
                    if args.kd_type == 'ce':
                        kd_loss = cross_entropy_loss_with_soft_target(
                            output, soft_label)
                    else:
                        kd_loss = F.mse_loss(output, soft_logits)
                    loss = args.kd_ratio * kd_loss + self.train_criterion(
                        output, labels)
                    loss_type = '%.1fkd+ce' % args.kd_ratio

                # update
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

                # measure accuracy and record loss
                losses.update(loss, images.size(0))
                self.update_metric(metric_dict, output, target)

                t.set_postfix({
                    'loss':
                    losses.avg.item(),
                    **self.get_metric_vals(metric_dict, return_dict=True),
                    'img_size':
                    images.size(2),
                    'lr':
                    new_lr,
                    'loss_type':
                    loss_type,
                    'data_time':
                    data_time.avg,
                })
                t.update(1)
                end = time.time()

        return losses.avg.item(), self.get_metric_vals(metric_dict)
Esempio n. 44
0
def randomConductace():
    #return random.uniform(0.01,1.0)
    return (round(random.betavariate(5, 80), 2) + 0.005) % 1
Esempio n. 45
0
def betafunc(a, b):
    ''' return appropriate function from random class '''
    return lambda: random.betavariate(float(a), float(b))
Esempio n. 46
0
def BETA(index, alpha: float = 1.0, beta: float = 1.0):
    return random.betavariate(alpha, beta)
kappa = 1.5  # concentration, must be >= 0
print "\n(Von Mises) Angular(mu=%d, kappa=%d)" % (mu, kappa)
angular = []
for i in xrange(20):
    angular.append(c.create_decimal(random.vonmisesvariate(mu, kappa)))
angular = sorted(angular)
for a in angular:
    print "%02.1d" % a,

# Beta distribution
alpha = 1
beta = 2
print "\nBeta(alpha=%d, beta=%d)" % (alpha, beta)
beta_v = []
for i in xrange(20):
    beta_v.append(random.betavariate(alpha, beta))
beta_v = sorted(beta_v)
for b in beta_v:
    print c.create_decimal(b),

# Gamma distribution
print "\nGamma(alpha=%d, beta=%d)" % (alpha, beta)
gamma = []
for i in xrange(20):
    gamma.append(random.gammavariate(alpha, beta))
gamma = sorted(gamma)
for g in gamma:
    print c.create_decimal(g),

# Weibull distribution
print "\nWeibull(alpha=%d, beta=%d)" % (alpha, beta)
Esempio n. 48
0
def give_timing(sess,
                overlap_time_ratio=0.3,
                sil_prob=0.2,
                sil_dur=[0.3, 2.0],
                allow_3fold_overlap=False):
    time_marked_sess = copy.deepcopy(sess)

    # Calculate the total length and derive the overlap time budget.
    total_len = np.sum(
        np.array([utt['length_in_seconds'] for utt in time_marked_sess]))
    total_overlap_time = total_len * overlap_time_ratio / (1 +
                                                           overlap_time_ratio)

    # Determine where to do overlap.
    nutts = len(time_marked_sess)
    to_overlap = bernoulli.rvs(1 - sil_prob,
                               size=nutts - 1).astype(bool).tolist()
    noverlaps = sum(to_overlap)

    # Distribute the budget to each utterance boundary with the "stick breaking" approach.
    probs = []
    rem = 1
    for i in range(noverlaps - 1):
        p = random.betavariate(1, 5)
        probs.append(rem * p)
        rem *= (1 - p)
    probs.append(rem)
    random.shuffle(probs)

    idx = -1
    overlap_times = [0.0]
    for b in to_overlap:
        if b:
            idx += 1
            overlap_times.append(probs[idx] * total_overlap_time)
        else:
            overlap_times.append(
                -np.random.uniform(low=sil_dur[0], high=sil_dur[1]))

    # Get all speakers.
    speakers = set(utt['speaker_id'] for utt in time_marked_sess)

    # Determine the offset values while ensuring that there is no overlap between multiple utterances spoken by the same person.
    offset = 0
    last_utt_end = {spkr: 0.0 for spkr in speakers}
    last_utt_end_times = sorted(list(last_utt_end.values()),
                                reverse=True)  # all zero (of course!)
    actual_overlap_time = 0
    for utt, ot in zip(time_marked_sess, overlap_times):
        spkr = utt['speaker_id']

        if len(last_utt_end_times) > 1 and (not allow_3fold_overlap):
            # second term for ensuring same speaker's utterances do not overlap.
            # third term for ensuring the maximum number of overlaps is two.
            ot = min(ot, offset - last_utt_end[spkr],
                     offset - last_utt_end_times[1])
        else:
            ot = min(ot, offset - last_utt_end[spkr])

        offset -= ot
        actual_overlap_time += max(ot, 0)
        utt['offset'] = offset
        offset += utt['length_in_seconds']

        last_utt_end[spkr] = offset

        last_utt_end_times = sorted(list(last_utt_end.values()), reverse=True)
        offset = last_utt_end_times[0]

    actual_overlap_time_ratio = actual_overlap_time / (total_len -
                                                       actual_overlap_time)

    attr = {
        'target_overlap_time_ratio': overlap_time_ratio,
        'actual_overlap_time_ratio': actual_overlap_time_ratio
    }

    return time_marked_sess, attr
Esempio n. 49
0
def new_thetas(types):
    """
    Return new thetas
    """
    return [random.betavariate(a, b) for a, b in types]
Esempio n. 50
0
def beta(alpha=2, beta=3):
    return random.betavariate(alpha, beta)
def fuzz_number(number):
    return int(random.betavariate(2, 8) * 5 * number)
Esempio n. 52
0
def randomsleep(t):
    'Sleep between zero and t seconds.'
    time.sleep(t * betavariate(0.7, 8))
Esempio n. 53
0
 def draw(self):
     return betavariate(self.alpha, self.beta)
Esempio n. 54
0
print(math.erfc(x))
print(math.gamma(x))
print(math.lgamma(x))
print(math.pi)
print(math.e)

# Random module

print(random.seed(3))
print(random.getstate())
state = random.getstate()
print(random.setstate(state))
print(random.getrandbits(43))
print(random.randrange(5))
print(random.randrange(3, 7))
print(random.randint(3, 5))
print(random.choice(list(range(10))))
print(random.shuffle(list(range(10))))
print(random.sample(range(10000000), k=60))
print(random.uniform(3, 76))
print(random.triangular(1, 3, 6))
print(random.betavariate(0.3, 0.7))
print(random.expovariate(1.5))
print(random.gammavariate(1.5, 4.5))
print(random.gauss(34, 5))
print(random.lognormvariate(34, 5))
print(random.normalvariate(34, 5))
print(random.vonmisesvariate(34, 5))
print(random.paretovariate(34))
print(random.weibullvariate(34, 5))
Esempio n. 55
0
    mRNA = ""
    flag = [False] * 7
    hist = [[0] * 3] * 4
    eval = [0] * 3
    subs = [0] * 30
    prin = [[0] * 30] * 3
    meta = [0] * 3
    output = random.choice("RPS")
else:
    for i in range(3):
        j = prin[i].index(max(prin[i]))
        if ((j < 3 and flag[0]) or (3 <= j < 6 and flag[1])
                or (6 <= j < 9 and flag[2]) or (9 <= j < 12 and flag[3])
                or (12 <= j < 18 and flag[4]) or (18 <= j < 24 and flag[5])
                or (j >= 24 and flag[6])):
            meta[i] *= random.betavariate(4.6, 1.4)
            k = mdl(subs[j] - k2i[input])
            if k == 2:
                meta[i] -= 1
            else:
                meta[i] += k
    for j in range(30):
        if ((j < 3 and flag[0]) or (3 <= j < 6 and flag[1])
                or (6 <= j < 9 and flag[2]) or (9 <= j < 12 and flag[3])
                or (12 <= j < 18 and flag[4]) or (18 <= j < 24 and flag[5])
                or (j >= 24 and flag[6])):
            for i in range(1, 3):
                prin[i][j] *= 0.9
            k = mdl(subs[j] - k2i[input])
            if k == 1:
                for i in range(3):
Esempio n. 56
0
import random

random.seed(3)
# call the function here

print(random.betavariate(0.9, 0.1))
Esempio n. 57
0
numbers_of_rewards_1 = [0] * d
numbers_of_rewards_0 = [0] * d
best_rewards = [0] * d
regret = []

for n in range(0, N):
    # Random Strategy
    strategy_rs = random.randrange(d)
    strategies_selected_rs.append(strategy_rs)
    reward_rs = X[n, strategy_rs]
    total_reward_rs = total_reward_rs + reward_rs
    # Thompson Sampling
    strategy_ts = 0
    max_random = 0
    for i in range(0, d):
        random_beta = random.betavariate(numbers_of_rewards_1[i] + 1,
                                         numbers_of_rewards_0[i] + 1)
        if random_beta > max_random:
            max_random = random_beta
            strategy_ts = i
        # accumulating best rewards for each round
        best_rewards[i] = best_rewards[i] + X[n, i]

    reward_ts = X[n, strategy_ts]
    if reward_ts == 1:
        numbers_of_rewards_1[
            strategy_ts] = numbers_of_rewards_1[strategy_ts] + 1
    else:
        numbers_of_rewards_0[
            strategy_ts] = numbers_of_rewards_0[strategy_ts] + 1
    strategies_selected_ts.append(strategy_ts)
    total_reward_ts = total_reward_ts + reward_ts
Esempio n. 58
0
 def sample(self):
     return {self.value: random.betavariate(self.alpha, self.beta)}
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import random

dataSet = pd.read_csv("Ads_CTR_Optimisation.csv")

n = 10000
d = 10
ads_selected = []
reward_1 = [0] * d
reward_0 = [0] * d

for n in range(n):
    ad = 0
    max_beta = 0
    for i in range(0, d):
        random_beta = random.betavariate(reward_1[i] + 1, reward_0[i] + 1)
        if (random_beta > max_beta):
            max_beta = random_beta
            ad = i
    ads_selected.append(ad)
    if (dataSet.values[n, ad] == 1):
        reward_1[ad] += 1
    else:
        reward_0[ad] += 1

plt.hist(ads_selected)
Esempio n. 60
0
number_of_reward0 = [0]* d # vector of size d. 
#number_of_reward0 is use to select how many times entity got reward up to round n

number_of_reward1 = [0] * d # use to store the value of rewards till i for each entity
#number_of_reward1 is use to select how many times entity got reward up to round n


entity_selected = [] # ads selected after each round
total_reward = 0 # to calculate total reward after all the rounds

for n in range(0,N):# N is the total number of rounds
    entity = 0    
    max_random = 0 # highest upper_bound among all entities
    for i in range(0,d):# d is the number of 
        random_beta =  random.betavariate(number_of_reward1[entity]+1,number_of_reward0[entity]+1)# correspond to different random draws taken from beta distribution for the entity                
                
        #Selecting the entity with maximum upper bound 
       if random_beta > max_random:
            max_random = random_beta
            entity = i # take up the entity number if we find a higher upper bound then max 
    entity_selected.append(entity)
    reward = dataset[n][entity]
    total_reward = total_reward + reward    # updating total reward
    if reward == 0:
        number_of_reward0[entity] = number_of_reward)[entity] + 1
    else:
        number_of_reward1[entity] = number_of_reward1[entity] + 1 # updating the rewards count for each entity

    
#Visualising the entities and results