def request(self): '''The method that determines the actual ping that will be made by the pinger object.''' randy = uniform(0, 1) ping_cdf = h.cdf(self.E) ping = h.get_index(randy, ping_cdf) return ping
def next_action(self): randy = uniform(0, 1) # Throwback to Archer. index = 0 # Worst case select the first action. # print("The p is: " + str(self.p)) probs = self.p / sum(self.p) cdf = h.cdf(probs) # print("The cdf is: " + str(cdf)) index = h.get_index(randy, cdf) return index
def next_action(self): '''Pick the next action of the learning automata based on the probability vector, self.p, of the LRI automata. At the first time instant all action probabilities are equally likely.''' # Pick a uniformly distributed random number to be tested against # the CDF of self.p. Used to determine the action of the # automaton. randy = uniform(0, 1) # Throwback to Archer. # On catastrophic failure pick the first action. index = 0 # Worst case select the first action. # print("The p is: " + str(self.p)) # Debug the change in self.p. cdf = h.cdf(self.p) # print("The cdf is: " + str(cdf)) # Debug the selected action. # index is the index of the CDF corresponding to the random # action. This value is the next action the automaton will choose. index = h.get_index(randy, cdf) return index
def get_cdf(self): cdf_counts = self.Nitems * 0.5**np.arange(1, self.Ncat + 1) cdf = helpers.cdf(self.Ncat) return np.vstack((cdf_counts, cdf))
from scipy import optimize, stats import numpy as np import helpers # Run once to compile the functions print("Compiling the functions...") print(helpers.one_trial_div2(20, 60)) print(helpers.get_tree_fudge_multi(20, 60, 2)) print(helpers.Ptree(20, 10)) print(helpers.f_empty(20, 10)) print(helpers.n_filled(20, 10)) print(helpers.cdf(20)) print(helpers.get_mc(20, 60, 2, 10)) class Counts(object): """Class that reads a counts filename and holds all the operations we want to do""" def __init__(self, fname): """Read a file of (sorted) counts""" self.counts = np.sort(np.loadtxt(fname))[::-1] self.Ncat = len(self.counts) self.Nitems = np.sum(self.counts) self.frequencies = self.counts / self.Nitems self.ranks = np.arange(1, self.Ncat + 1) def get_cdf(self): cdf_counts = self.Nitems * 0.5**np.arange(1, self.Ncat + 1) cdf = helpers.cdf(self.Ncat) return np.vstack((cdf_counts, cdf))
def get_cdf(self): cdf_counts = self.Nitems * 0.5**np.arange(1, self.Ncat+1) cdf = helpers.cdf(self.Ncat) return np.vstack((cdf_counts, cdf))
from scipy import optimize, stats import numpy as np import helpers # Run once to compile the functions print("Compiling the functions...") print(helpers.one_trial_div2(20, 60)) print(helpers.get_tree_fudge_multi(20, 60, 2)) print(helpers.Ptree(20, 10)) print(helpers.f_empty(20, 10)) print(helpers.n_filled(20, 10)) print(helpers.cdf(20)) print(helpers.get_mc(20, 60, 2, 10)) class Counts(object): """Class that reads a counts filename and holds all the operations we want to do""" def __init__(self, fname): """Read a file of (sorted) counts""" self.counts = np.sort(np.loadtxt(fname))[::-1] self.Ncat = len(self.counts) self.Nitems = np.sum(self.counts) self.frequencies = self.counts/self.Nitems self.ranks = np.arange(1, self.Ncat+1) def get_cdf(self): cdf_counts = self.Nitems * 0.5**np.arange(1, self.Ncat+1) cdf = helpers.cdf(self.Ncat)