def observationGenerator(self):
		"""Generate a distribution of real reward  
		"""
		C_first_K=bernoulli.rvs(self.P, size=(self.N+2,self.K))
		C_rest=bernoulli.rvs(self.P-self.delta, size=(self.N+2,self.L-self.K))
		C = np.hstack((C_first_K,C_rest))
		# C = bernoulli.rvs(0.2, size=(N+2,L))
		C = np.asfarray(C, dtype='float')
		return C
def preTrainingParameters(parameters, instances, nodes):
    a = 0.1
    ###convert feature to binary
    for i in range(len(instances)):
        instance = instances[i]
        feature = instance[1:]
        instances[i] = [bernoulli.rvs(1.0/(1 + math.exp(-1 * x)), size=1)[0] for x in feature]

    currentLayerNodes = instances

    for parameter in range(len(parameters)):
        currentParameter = parameters[parameter]
        for instance in range(len(currentLayerNodes)):
            currentLayerNode = currentLayerNodes[instance]
            ### remove y for input layer
            #if (parameter == 0):
            #    currentLayerNode = currentLayerNode[1:]
            ###use visible to get hidden
            nextLayerNodes = [0 for x in range(nodes[parameter])]
            for nextLayerNode in range(len(nextLayerNodes)):
                tmp = 0
                for w in range(len(currentParameter)):
                    tmp = tmp + currentLayerNode[w]*currentParameter[w][nextLayerNode]
                nextLayerNodes[nextLayerNode] = bernoulli.rvs(1.0/(1 + math.exp(-1 * tmp)), size=1)[0]
            ###use hidden to get visible, negative phase
            visibleNodesNegative = [0 for x in range(len(currentParameter))]
            for visibleNodeNegative in range(len(visibleNodesNegative)):
                tmp = 0
                for w in range(len(nextLayerNodes)):
                    tmp = tmp + nextLayerNodes[w]*currentParameter[visibleNodeNegative][w]
                visibleNodesNegative[visibleNodeNegative] = bernoulli.rvs(1.0/(1 + math.exp(-1 * tmp)), size=1)[0]
            ###use negative visible to get negative hidden
            nextLayerNodesNegative = [0 for x in range(nodes[parameter])]
            for nextLayerNodeNegative in range(len(nextLayerNodesNegative)):
                tmp = 0
                for w in range(len(currentParameter)):
                    tmp = tmp + visibleNodesNegative[w]*currentParameter[w][nextLayerNode]
                nextLayerNodesNegative[nextLayerNodeNegative] = bernoulli.rvs(1.0/(1 + math.exp(-1 * tmp)), size=1)[0]
            ###compute delta parameters
            #deltaParameter = [[0 for x in range(len(currentParameter[0]))] for y in range(len(currentParameter))]
            for i in range(len(currentParameter)):
                for j in range(len(currentParameter[0])):
                    currentParameter[i][j] = currentParameter[i][j] + a * (currentLayerNode[i]*nextLayerNodes[j]-visibleNodesNegative[i]*nextLayerNodesNegative[j])
        parameters[parameter] = currentParameter
         ###update currentLayerNodes for pre-training next layer parameters
        for instance in range(len(currentLayerNodes)):
            currentLayerNode = currentLayerNodes[instance]
            for nextLayerNode in range(len(nextLayerNodes)):
                tmp = 0
                for w in range(len(currentParameter)):
                    tmp = tmp + currentLayerNode[w]*currentParameter[w][nextLayerNode]
                nextLayerNodes[nextLayerNode] = bernoulli.rvs(1.0/(1 + math.exp(-1 * tmp)), size=1)[0]
            currentLayerNodes[instance] = nextLayerNodes
def SAR(n,V,experiment): 
    # arms = number of edges in complete graph with vertices V
    K = (V*(V-1))/2 
    
    actual_means=oracle_means(experiment)
    oracleset=oracle_set(experiment)
    
    rewards=[0]*K # sum of the rewards of each arm that has been sampled
    rounds=[0]*K # count of total number rounds each arm has been sampled
    empirical_means=[0]*K # set of the empirical means of arms
    
    # Sample each arm n/K times
    numbersamples= floor(n/K) # inherently floored because diving two integers
    
    for arm in range(K):
        arm_samples = bernoulli.rvs(actual_means[arm],size=numbersamples)
        rewards[arm]=rewards[arm]+sum(arm_samples)
        rounds[arm]=rounds[arm]+numbersamples
        empirical_means[arm]=(rewards[arm]/float(rounds[arm]))
        
    acceptedset = calcMaxST(V,empirical_means) # Indices of ARMS of MaxST
    
    if set(acceptedset)==set(oracleset):
        return 1.0
    else:
        return 0.0
Esempio n. 4
0
def plot(x):
    plt.clf()
    plt.plot(x, sigmoid(x), label='sigmoid')
    plt.legend(loc=4)
    plt.scatter( x, bernoulli.rvs( sigmoid(x) ) )
    fig = plt.figure(1)
    fig.savefig('sigmoid.pdf')
Esempio n. 5
0
def bar_ub_zuf(n, p, th,seed= None):

    #init
    G = nx.Graph()
    G.name = "bar_ub_zuf"
    G.add_nodes_from(range(1, n + 1))
    posibleEdges = {}
    keyAux = 0
    giantComponent=False
    probabilities = bernoulli.rvs(p, size = n*(n-1)/2)
    for node1 in xrange(0, n):
        for node2 in xrange(node1 + 1, n):
            #Si el enlace existe o no, puede hacerse offline antes del experimento.
            posibleEdges[keyAux]=(probabilities[keyAux], node1, node2)
            keyAux += 1
    for numEdges in xrange(0, len(posibleEdges)):
        posEdge = posibleEdges.pop(random.choice(posibleEdges.keys()))
        #Existe el enlace
        if posEdge[0]==1:
            G.add_edge(posEdge[1],posEdge[2])
            #Comprobacion de componente gigante
            if len(nx.connected_components(G)[0])>n*th:
               giantComponent=True
               break
    #Devuelve si existe una componente gigante y la iteración en que se alcanza
    numEdgesForGiantComponent=numEdges+1
    return giantComponent,numEdgesForGiantComponent
Esempio n. 6
0
def law_of_large_numbers():
    x = np.arange(1, 1001, 1) 
    r = bernoulli.rvs(0.3, size=1000)
    y = []
    rsum =0.0
    for i in range(1000):
        if r[i]==1:
            rsum=rsum+1
        y.append(rsum/(i+1)-0.3)
    plt.plot(x, y, color='red')
    plt.show()
    
    x = np.arange(1, 1001, 1) 
    r1 = binom.rvs(10, 0.6, size=1000)
    r2 = poisson.rvs(mu=6, size=1000)
    r3 = norm.rvs(loc=6, size=1000)

    y = []
    rsum=0.0
    for i in range(1000):
        rsum=rsum+(r1[i]+r2[i]+r3[i])
        y.append(rsum/((i+1)*3)-6)

    plt.plot(x, y, color='red')
    plt.show()
Esempio n. 7
0
def methods(vals, n, n_iters, hill_climbing = False, sim_annealing = False, l="s", method = 1):
    if l == "h":
        hill_climbing = True
    elif l == "a":
        sim_annealing = True
    t0 = time.time()    
    best = float("inf")
    best_s = init(method, n)
    if sim_annealing:
        local_s = best_s
        local = float("inf")
        t_ = t(n_iters)
    for i in range(n_iters):
        if hill_climbing:
            new_s = rand_neighbor(n, best_s, method)
        elif sim_annealing:
            new_s = rand_neighbor(n, local_s, method)            
        else:
            new_s = init(method, n)            
        new = resid(vals, new_s, n, method)
        if sim_annealing:
            if new<local:
                local, local_s = new, new_s
            elif bernoulli.rvs(math.exp(-(new-local)/t_), 1):
                local, local_s = new, new_s               
        if new < best:
            best, best_s = new, new_s
    tf = time.time()-t0
    return best, tf
Esempio n. 8
0
def fft_to_hkl(h, k, l, val, coeffs, fsc_curve, resolution, full_size, flag_frac):
    '''Reformat fft record as hkl record'''
    if h or k or l:
        res = full_size / (np.linalg.norm(np.asarray([h, k, l])))
    else:
        res = 0.0

    if res < resolution or not np.isfinite(res):
        return None, None

    mag = np.abs(val)
    angle = np.angle(val, deg = True)

    if angle < 0:
        angle += 360.0

    fsc = curve_function((1. / res), coeffs, fsc_curve)
    sig = fsc_to_sigf(mag, fsc)
    fom = fsc_to_fom(fsc)
    hla, hlb = fom_to_hl(fom, np.angle(val))
    rf = bernoulli.rvs(flag_frac)
    record = np.array([h, k, l, mag, sig, angle, fom, hla, hlb, 0.0, 0.0, rf], dtype = np.float32)
    
    if not np.all(np.isfinite(record)):
        print("Skipping record %i %i %i - " %(h, k, l)),
        print(record)
        return None, None

    return record, res
def simulator_spike_slab():
    #First, simulate Beta-Bernoulli conjugate prior
    z = []     #n_factor * n_gene array
    v = []     #n_factor * n_gene array

    #initiate v to all zeros
    for i in range(n_factor):
        temp = []
        for j in range(n_gene):
            temp = temp + [0]
        v.append(temp)

    #simulate pi and z
    for i in range(n_factor):
        pi = sampler_beta(beta[0], beta[1])
        z.append(bernoulli.rvs(pi, size = n_gene))

    #simulate v
    for i in range(n_factor):
        for j in range(n_gene):
            if (z[i][j] != 0):
                sigma = sampler_Gamma(normalGamma[2], normalGamma[3])
                v[i][j] = sampler_Normal(normalGamma[0], sigma/normalGamma[1])

    return v
Esempio n. 10
0
def noisy_dropout(p,h,hp=None):
    mask = bernoulli.rvs(p, size=h.shape)
    h = np.multiply(h,mask)
    h[np.abs(h)<1e-32] = np.random.uniform(size=h[np.abs(h)<1e-32].shape)
    if hp is not None:
        hp = np.multiply(hp,mask)
        hp[np.abs(hp)<1e-32] = np.random.uniform(size=hp[np.abs(hp)<1e-32].shape)
    return mask,h,hp
Esempio n. 11
0
 def Slap(self, pile) :
     if self.pile.len() == 0 or pile.len() < 2:
         return 0
     else :
         if bernoulli.rvs(self.propensityToSlap) :
             return 10
         else :
             return 0
Esempio n. 12
0
def main():
    N = 5000

    c1 = bernoulli.rvs(p1, size=N)
    c2 = bernoulli.rvs(p2, size=N)
    c3 = bernoulli.rvs(p3, size=N)

    res = (~c1 & c2) | (c1 & c3)

    T = np.sum(res)
    F = np.size(res) - np.sum(res)

    logd("{}/{} \t {}".format(T, N, T / float(N)))

    df = pd.DataFrame(np.c_[c1, res, range(N)])
    g = df.groupby([0, 1])[2].apply(lambda x: len(x.unique()))

    with open("coins2_artificial.pb", "w") as fout:
        fout.write(
            """
% aprob debug flags
%:- set_value(dbg_read,2).
%:- set_value(dbg_query,2).
%:- set_value(dbg_write,2).
"""
        )
        for i, j in reversed(zip(g, g.keys())):
            j_str = [2 if el == 1 else 1 for el in j]
            fout.write("observe({}, {}, {}).\n".format(j_str[0], j_str[1], i))
        fout.write(
            """
pb_dirichlet(1.0, toss, 2, 3).

generate(1, Val) :-
    toss(1, 1),
    toss(Val, 2).
generate(2, Val) :-
    toss(2, 1),
    toss(Val, 3).

pb_plate(
   [observe(Val1, Val2, Count)],
   Count,
   [generate(Val1, Val2)]).
"""
        )
def main():
    cvs_files = (
        'procesamientoImagenes_codigoBarras.csv',
        'procesamientoImagenes_logotipos.csv',
        'procesamientoImagenes_ocr.csv')

    for filename in cvs_files: 
        path = os.path.abspath('..')+'/data/'+filename
        fl = open(path, "w")
        fl.write('frontal, izquierda, derecha, trasera\n')
        for trial in zip(bernoulli.rvs(random.random(), size=30),
                      bernoulli.rvs(random.random(), size=30),
                      bernoulli.rvs(random.random(), size=30),
                      bernoulli.rvs(random.random(), size=30) ):
            fl.write(str(trial[0])+', '+str(trial[1])+', '+str(trial[2])+', '+str(trial[3])+' \n')
        fl.close()
    print "Done."
Esempio n. 14
0
def crossover(c, x, xx):
    parent = [x, xx]
    parent_choice = bernoulli.rvs(c, size=len(x))
    y = []

    for i, p in enumerate(parent_choice):
        y.append(parent[p][i])
    return y
Esempio n. 15
0
def er_graph(N, p):
    ''' Generates a ER graph'''
    G = nx.Graph()
    G.add_nodes_from([range(N)])
    for node1 in G.nodes():
        for node2 in G.nodes():
            if node1 < node2 and bernoulli.rvs(p=p):
                G.add_edge(node1, node2)
    return G
Esempio n. 16
0
def bernoulli_success_prob(success):
    simlen = int(1e5)
    data_bern = bernoulli.rvs(size=simlen, p=success)
    #print(data_bern)
    #Calculating the number of favourable outcomes
    err_ind = np.nonzero(data_bern == 1)
    #calculating the probability
    err_n = np.size(err_ind) / simlen
    return err_n
Esempio n. 17
0
def bsc_transfer(data, error_rate):
    '''
        transfer data through this noisy channel
    '''
    data_noise = bernoulli.rvs(p=error_rate, size=data.size)
    index_corrupted = np.where(data_noise == 1)
    data_output = data.copy()
    data_output[index_corrupted] = 1 - data[index_corrupted]
    return data_output
Esempio n. 18
0
def flip_image(image, steering_angle, flipping_prob=0.6):
    # if image is flipped the steering angle needs to be negated
    coin = bernoulli.rvs(flipping_prob)
    #print("flip coin =" ,coin)
    # if head is true then flip the image and negate the steering angle and return
    if coin == 0:
        return np.fliplr(image), -1 * steering_angle
    else:
        return image, steering_angle
Esempio n. 19
0
def mirror(image, steering_angle, prob=0.5):
    #Source: https://github.com/upul/Behavioral-Cloning
    #It randomly flips the images

    mirror = bernoulli.rvs(prob)
    if mirror:
        return np.fliplr(image), -1 * steering_angle
    else:
        return image, steering_angle
Esempio n. 20
0
 def getnewimage(self,image, steering_angle):
     prob = bernoulli.rvs(self.shrate)
     if prob == 1:
         image, steering_angle = self.rnshear(image, steering_angle)
     image = self.crop(image)
     image, steering_angle = self.rnflip(image, steering_angle)
     image = self.rngamma(image)
     image = self.resize(image)
     return image, steering_angle
Esempio n. 21
0
def erdosGraph(N, p):
    G = nx.Graph()
    G.add_nodes_from(range(N))
    listG = list(G.nodes())
    for i, node1 in enumerate(listG):
        for node2 in listG[i + 1:]:
            if (bernoulli.rvs(p=p)):
                G.add_edge(node1, node2)
    return G
Esempio n. 22
0
def er_graph(N, p):
    from scipy.stats import bernoulli
    G = nx.Graph()
    G.add_nodes_from(range(N))
    for node1 in G.nodes():
        for node2 in G.nodes():
            if node1 < node2 and bernoulli.rvs(p=p):
                G.add_edge(node1, node2)
    return G
def re_generar(muestra, tags_all, tag, prob):
    sent, indices, tags = just_tag_word(muestra, tags_all, tag)
    noise_mask = bernoulli.rvs(prob, size=sent.shape)
    bool_list = list(map(bool, noise_mask))
    to_replace = sent[bool_list]
    indix = indices[bool_list]
    tagx = tags[bool_list]
    idx_to_orig = len(sent)
    return to_replace, indix, tagx, idx_to_orig
Esempio n. 24
0
def splitGraph(graph, random=True, p=1 / float(3)):
    all_edges = graph.getAllEdges()
    vertices = graph.vlist
    inc = bernoulli.rvs(p, size=len(all_edges))
    train = SparseGraph(vertices)
    test = SparseGraph(vertices)
    train.addEdges(all_edges[inc == 0])
    test.addEdges(all_edges[inc == 1])
    return train, test
Esempio n. 25
0
def Zn(n, draws):
    Zarray = np.zeros(draws)
    for i in range(draws):
        p = 0.5
        x0 = bernoulli.rvs(p, size=n)
        x1 = np.array(x0)
        np.place(x1, x0 == 0, -1)
        Zarray[i] = (1 / n) * np.sum(x1)
    return Zarray
 def generate_pkt(self, round_index):
     # No matter whether the current round is scheduled for retransmission,
     # we should check if new packet is arrived and save it into the buffer
     # Each device is modelized one packet source respecting to Bernoulli distribution.
     # The probability is defined as $\lambda/n$, where $\lambda$ is the fresh random access
     # poisson distribution mean, n is the total device number.
     if bernoulli.rvs(self.proba) == 1:
         pkt = Packet(round_index, self.index, 1, self.POWER_LEVELS[0], -1)
         self.packets.append(pkt)
    def stochastic_weights(self):
        '''For use in decision field theory model. Probabilistically set weights for attributes to be either 
        0 or 1 so that at each time step crossing alternatives are compared on a single attribute only.
        '''

        # Draw 0 or 1 at random from bernouli distribution, with prob pof 1 given by weight parameter alpha
        weight_time = bernoulli.rvs(self._alpha)
        weight_ve = int(not weight_time)
        return np.array([weight_time, weight_ve])
Esempio n. 28
0
def guessing_sex(row):
    """Male = 1, female = 0
    """
    if row['greek_council'] == 'Fraternity':
        return 1
    elif row['greek_council'] == 'Sorority':
        return 0
    else:
        return bernoulli.rvs(p=0.5)
Esempio n. 29
0
def erdosGraph(N, p):
    G = nx.Graph() # 빈 그래프를 G에 저장
    G.add_nodes_from(range(N)) #0 ~  N-1 까지 노드 추가
    listG = list(G.nodes()) # 모든 노드를 리스트에 저장
    for i, node1 in enumerate(listG): # enumerate()는 인덱스 값을 포함하여 리턴함
        for node2 in listG[i+1:]: #이후에 노드 간 엣지로 연결하기 위해 인덱스 i에서 +1을 함
            if (bernoulli.rvs(p=p)): # p에는 0.18
                G.add_edge(node1, node2) # 자동으로 겹치지 않는 노드가 생성되어 복수의 엣지 추가
    return G
Esempio n. 30
0
File: AEP.py Progetto: AjeyPaiK/AEP
def gen_bernoulli(X, n, p):
    bernoulli_sequences = []
    for i in range(0, X):
        sequence = bernoulli.rvs(size=n, p=p)
        sequence = tuple(sequence)
        bernoulli_sequences.append(sequence)
    # print("The sequences are:\n")
    # print(*bernoulli_sequences, sep="\n")
    return bernoulli_sequences
Esempio n. 31
0
def splitGraph(graph, random=True ,p=1/float(3)):
    all_edges = graph.getAllEdges()
    vertices =  graph.vlist
    inc = bernoulli.rvs(p, size = len(all_edges))
    train = SparseGraph(vertices)
    test =  SparseGraph(vertices)
    train.addEdges(all_edges[inc==0])
    test.addEdges(all_edges[inc==1])
    return train, test
Esempio n. 32
0
def gen_random_adj_matrix(N):
    b = np.random.random_integers(0,2000,size=(N,N))
    b_symm = (b + b.T)/2    
    p = 0.6    
    for i,j in itertools.combinations(range(N),2):
        if (bernoulli.rvs(p,1)-1):
            b_symm[i][j]=0
            b_symm[j][i]=0
    return b_symm
Esempio n. 33
0
def compute_bounds(p_hat, decay, n, alpha, n_sim=1000):
    bernoulli_samples = bernoulli.rvs(p_hat, size=n * n_sim).reshape(n_sim, n)
    # TODO: Check if shapes match

    #empirical_bounds = (1 - decay) * (bernoulli_samples * (n - np.arange(1, n + 1))).sum(axis=1)
    empirical_bounds = (1 - decay) * np.matmul(bernoulli_samples, decay ** (n - np.arange(1, n + 1)).reshape(n, 1)).sum(axis=1)
    lb, ub = np.percentile(empirical_bounds, q=[alpha * 100, (1 - alpha) * 100])

    return lb, ub
Esempio n. 34
0
    def _sample(self, p, x, size):

        try:
            size = x.shape[0]
        except AttributeError:
            size = None
        b = bernoulli.rvs(p, size=size)
        self.value = (x + b) % self.k
        return self.value
Esempio n. 35
0
def cook_codebook(M, n, p):
    bernoulli_sequences = []
    for i in range(0, M):
        sequence = bernoulli.rvs(size=n, p=p)
        sequence = tuple(sequence)
        bernoulli_sequences.append(sequence)
    # print("The sequences are:\n")
    # print(*bernoulli_sequences, sep="\n")
    return bernoulli_sequences
Esempio n. 36
0
 def initAlpha(self):
     """ Initialisation of ARD on the weights"""
     alpha = [s.zeros(self.K, ) for m in range(self.M)]
     for m in range(self.M):
         tmp = bernoulli.rvs(p=0.5, size=self.K)
         tmp[tmp == 1] = 1.
         tmp[tmp == 0] = 1E5
         alpha[m] = tmp
     return alpha
def bsc_channel(x, p):
    y = []
    for c in x:
        r = bernoulli.rvs(p)
        if r:
            c = 1 ^ int(c)  #bit flip

        y.append(c)
    return y
Esempio n. 38
0
def er_graph(N, p):
    """"Generate an ER graph."""
    G = nx.Graph()
    G.add_nodes_from(range(N))
    for node1 in G.node():
        for node2 in G.node():
            if node1 < node2 and bernoulli.rvs(p=p):
                G.add_edge(node1, node2)
    return G
Esempio n. 39
0
 def play(self):
     win_value = bernoulli.rvs(self.probability_vector[self.current_index])
     self.win_value[self.current_index] += win_value
     self.win_value_in_time.append(win_value)
     self.number_of_games[self.current_index] += 1
     self.mean_win_value[self.current_index] = self.win_value[
         self.current_index] / self.number_of_games[self.current_index]
     self.total_number_of_games += 1
     self.current_index_vector.append(self.current_index)
 def _initialize(self):
     # initialize all z_kn, with probability p=0.5 of being active
     samples = bernoulli.rvs(p=0.5, size=self.N * self.K_plus).reshape(
         self.K_plus, self.N)
     self.M = np.sum(samples, axis=1)
     self.K_plus = np.count_nonzero(self.M)
     mask = self.M > 0
     self.active_mask = mask
     return samples
Esempio n. 41
0
def unknown_sex(sexstring):
    if sexstring == "Unknown":
        sex = bernoulli.rvs(im_p)
        if sex == 1:
            return "Intact Male"
        else:
            return "Intact Female"
    else:
        return sexstring
Esempio n. 42
0
def sim_t(X, y, z, p=1):
    """
    Generates labels from simulated annotators for the input dataset.

    Parameters
    ----------
    X: array of shape = [n_samples, n_features]
        Input examples
    y: array of shape = [n_samples]
        Input original labels
    z: array of shape = [n_annotators]
        Annotators' expertise
        +1 if annotator labels all examples correctly
        -1 if annotator flips all labels
        0 if annotator generates noisy labels
    p: positive integer
        Noise parameter (see paper)

    Returns
    -------
    Y: array of shape = [n_samples, n_annotators]
        Simulated labels
    """

    m, n = X.shape
    nt = len(z)  # no. of teachers
    # noise_level = 1.# higher values result in low disagreement
    # first train a linear model, obtain scores
    cparam = 2 ** array([-14., -12., -10., -8., -6., -4., -2., -1., 0., 1., 2., 4., 6., 8., 10., 12., 14.])
    model = exp.train(X, y, 1. / cparam)
    f = model.predict(X)
    f = f * 1. / max(abs(f))  # scale to [-10, 10]
    f = 10 * f  # scores closer to +/-10 will get a prob 1
    f = 2 * (1 - (1. / (1 + np.exp(p * -0.25 * abs(f)))))

    Y = empty((m, nt))  # noisy labels
    for i in arange(m):
        # with prob=f/2, flip m coins
        Y[i] = -y[i] * (2 * bernoulli.rvs(f[i] / 2., size=nt) - 1)
        # with prob=f, flip all coins
        if sign(sum(Y[i])) == sign(y[i]):
            Y[i] = -Y[i] * (2 * bernoulli.rvs(f[i] / 1.) - 1)

    return Y
Esempio n. 43
0
def gen_regressor_dataset(df_path, 
                           num_labels,  
                           image_shape, 
                           num_samples=None, data_root_dir=None, flip_prob=0.5):
    """
    Generate dataset as list of numpy array from dataframe
    
    Input:
        df (pandas.DataFrame)
        num_labels (int)
        image_shape (tuple)
       
    Output:
        X (numpy.ndarray): model has 1 input
        y (np.ndarray) - (num_samples, num_labels)
    
    """
    df = pd.read_csv(df_path)
    
    if not num_samples:
        num_samples = min(len(df), 19000)
    
    X = np.zeros((num_samples, ) + image_shape) # only 1 input
    y = np.zeros((num_samples, num_labels))
    
    flip = bernoulli.rvs(flip_prob, size=num_samples) == 1
    
    # Iterate through the whole dataset
    for i in tqdm(range(num_samples)):  # i is used to iterate batch 
        # get image file name
        file_name = df.iloc[i].frame_name
        # read image
        img = cv.imread(data_root_dir + file_name, 0)
        
        bottom_half = img[100 : , :]
        
        # down sample & reshape image
        img = np.float32(cv.resize(bottom_half, (image_shape[1], image_shape[0]), interpolation=cv.INTER_AREA))
        if len(img.shape) == 2:
            img = img.reshape((image_shape))

        # check if this sample needs to be flipped
        if flip[i]:
            img = np.fliplr(img)

        # store image to X
        X[i, :, :, :] = img
        
        # create y
        angle_val_list = df.iloc[i].angle_val[1: -1].split(", ")
        if flip[i]:
            y[i, :] = np.array([-float(angle) for angle in angle_val_list])
        else:
            y[i, :] = np.array([float(angle) for angle in angle_val_list])

    return X, y
Esempio n. 44
0
def process_probCond(qinput, qoutput, lock, pileup_prefix, parser_parameters,
                     ratio, n, ancestral):
    pileup = pp.openPileup(pileup_prefix, 'r')
    qualityEncoding = parser_parameters[0]
    minQual = parser_parameters[1]
    minCount = parser_parameters[2]
    minCoverage = parser_parameters[3]
    maxCoverage = parser_parameters[4]

    #creation of the parser object
    if ancestral == "provided":
        parser = pp.Pileup_parser_provided(qualityEncoding, minQual, minCount,
                                           minCoverage, maxCoverage)
    elif ancestral == "unknown":
        parser = pp.Pileup_parser_folded(qualityEncoding, minQual, minCount,
                                         minCoverage, maxCoverage)
    else:
        parser = pp.Pileup_parser_ref(qualityEncoding, minQual, minCount,
                                      minCoverage, maxCoverage)
    f = pp.Format()

    for item in iter(qinput.get, 'STOP'):
        l = []
        lock.acquire()
        pileup.seek(item[0])
        for i in range(item[1]):
            l.append(pileup.readline())
        #...
        lock.release()
        p_list = []
        for l_item in l:
            parsed = parser.get_pileup_parser(l_item)
            if parsed['valid'] == 1:
                if bernoulli.rvs(1. / ratio) == 1:
                    info = f.format('info', parsed)
                    unfolded = int(info.split()[7])
                    SE = np.fromstring(f.format('qual', parsed),
                                       dtype=float,
                                       sep=' ')
                    #print SE
                    votemp = np.fromstring(f.format('freq', parsed),
                                           dtype=int,
                                           sep=' ')
                    SEtemp = 10**(-SE / 10)
                    p = prob_cond_true_freq(n, votemp, SEtemp, unfolded)
                    if np.sum(p) > 0:
                        p_list.append(p)
                #....
            #...
        #...
        if len(p_list) != 0:  #in case that all the lines parsed are not valid
            qoutput.put(p_list)

        #...
    #...
    pileup.close()
Esempio n. 45
0
def random_sampling(CTR1, CTR2):
    CTR1 = float(CTR1)
    CTR2 = float(CTR2)
    ACTUAL_CTR = [CTR1, CTR2]

    n = 1000  # number of trials
    regret = 0
    total_reward = 0
    regret_list = [
    ]  # list for collecting the regret values for each impression (trial)
    ctr = {0: [], 1: []}  # lists for collecting the calculated CTR
    index_list = []  # list for collecting the number of randomly choosen Ad
    impressions = [0, 0]
    clicks = [0, 0]

    for i in range(n):

        random_index = np.random.randint(
            0, 2, 1)[0]  ## randomly choose the value between [0,1]
        index_list.append(random_index)  ## add the value to list

        impressions[
            random_index] += 1  ## add 1 impression value for the choosen Ad
        did_click = bernoulli.rvs(
            ACTUAL_CTR[random_index]
        )  ## simulate if the person clicked on the ad usind Actual CTR value
        # did_click = False

        if did_click:
            clicks[
                random_index] += did_click  ## if person clicked add 1 click value for the choosen Ad

        ## calculate the CTR values and add them to list
        if impressions[0] == 0:
            ctr_0 = 0
        else:
            ctr_0 = clicks[0] / impressions[0]

        if impressions[1] == 0:
            ctr_1 = 0
        else:
            ctr_1 = clicks[1] / impressions[1]

        ctr[0].append(ctr_0)
        ctr[1].append(ctr_1)

        ## calculate the regret and reward
        regret += max(ACTUAL_CTR) - ACTUAL_CTR[random_index]
        regret_list.append(regret)
        total_reward += did_click

    count_series = pd.Series(index_list).value_counts(normalize=True)
    ad_1 = round(count_series[0], 3)
    ad_2 = round(count_series[1], 3)
    regret_list = [round(x, 2) for x in regret_list]
    return total_reward, ad_1, ad_2, regret_list
Esempio n. 46
0
def gen_new_image(img, steering_angle, top_crop_percent=0.35, bottom_crop_percent=0.1,
                       resize_dim=(64, 64), do_shear_prob=0.9):
    head = bernoulli.rvs(do_shear_prob)
    if head == 1:
        img, steering_angle = randshear(img, steering_angle)
    img = crop(img, top_crop_percent, bottom_crop_percent)
    img, steering_angle = randflip(img, steering_angle)
    img = randgamma(img)
    img = resize(img, resize_dim)
    return img, steering_angle
Esempio n. 47
0
def random_flip(image, steering_angle, flipping_prob=0.5):
    """
        flip a coin to see if the image will be flipped. if so,
        steering_angle will be inverted
    """
    head = bernoulli.rvs(flipping_prob)
    if head:
        return np.fliplr(image), -1 * steering_angle
    else:
        return image, steering_angle
Esempio n. 48
0
def er_graph(N, p):
    """Generate an ER graph"""
    G = nx.Graph()
    G.add_nodes_from(range(N))
    for node1 in G.nodes():
        for node2 in G.nodes():
            if node1 < node2 and bernoulli.rvs(
                    p=p):  #n1<n2 para evitar repetir pares de nodos
                G.add_edge(node1, node2)
    return G
Esempio n. 49
0
 def Play(self):
     t = 1
     self.W.append(self.W_init)
     while(t <= self.T )and(self.W[t-1]>0):
         self.W.append(self.W[t-1] + 2*bernoulli.rvs(self.p) - 1)
         t+=1
     if(t <= self.T):
         self.T_0 = t-1
     else:
         self.T_0 = 0
Esempio n. 50
0
def generate_data_equal_size(size, bcr, lift):
    df = pd.DataFrame(['A'] * int(size / 2) + ['B'] * int(size / 2),
                      columns=['group'])
    df.loc[df['group'] == 'A',
           'clicked'] = bernoulli.rvs(bcr,
                                      size=len(df[df['group'] == 'A']),
                                      random_state=42)
    df.loc[df['group'] == 'B',
           'clicked'] = bernoulli.rvs(bcr + lift,
                                      size=len(df[df['group'] == 'B']),
                                      random_state=42)

    summary = df.pivot_table(index='group', values='clicked', aggfunc=np.sum)
    summary['count'] = df.pivot_table(index='group',
                                      values='clicked',
                                      aggfunc=lambda x: len(x))
    summary['ctr'] = summary['clicked'] / summary['count']
    print(summary)
    return df, summary
Esempio n. 51
0
def sample_p_bern(n, s, p=0.5):
    # get n smaples of size s and graph it
    vals = []
    for i in range(n):
        tot = 0
        for j in range(s):
            tot += bernoulli.rvs(p)
        tot = tot / s
        vals.append(tot)
    plot_hist(vals, 'bernoulli_p_hist')
Esempio n. 52
0
def crossover(c, x, xx):
    parent = [x, xx]
    parent_choice = bernoulli.rvs(c, size=len(x))
    y = []
    for p in parent_choice:
        for b in parent[p]:
            if not (b in y):
                y.append(b)
                break

    return y
Esempio n. 53
0
File: 28.py Progetto: XNYu/Statistic
def law_of_large_numbers():
    x = np.arange(1, 1001, 1) 
    r = bernoulli.rvs(0.3, size=1000)
    y = []
    rsum =0.0
    for i in range(1000):
        if r[i]==1:
            rsum=rsum+1
        y.append(rsum/(i+1))
    plt.plot(x, y, color='red')
    plt.savefig('law_of_large_numbers.png')
Esempio n. 54
0
def er_graph(N,p):
    """ ER graph """
# loop over all pairs of nodes add an edge with prob p.
    G = nx.Graph() #empty graph
    G.add_nodes_from(range(N))
            
    for node1 in G.nodes():
        for node2 in G.nodes():
            if node1 < node2 and bernoulli.rvs(p=p) == True:
                G.add_edge(node1,node2)
    return G
Esempio n. 55
0
    def energy_zorb_or_none(world):
        """
            p_find_energy = max_energy_day / world_size
            p_find_zorb = no_zorbs / world_size
        """

        energy_found = bernoulli.rvs(
            (world.energy / world.size),
            size=1
        )
        if energy_found[0] == 1:
            return Actions.energy

        zorb_found = bernoulli.rvs(
            (world.zorbs_no / world.size),
            size=1
        )
        if zorb_found[0] == 1:
            return Actions.zorb

        return Actions.none
Esempio n. 56
0
def law_of_large_numbers():
    x = np.arange(1, 1001, 1) 
#     scipy.stats.bernoulli.rvs(p,loc=0,size=1)返回size个符合泊松分布的随机变量
    r = bernoulli.rvs(0.3, size=1000)
    y = []
    rsum =0.0
    for i in range(1000):
        if r[i]==1:
            rsum=rsum+1
        y.append(rsum/(i+1))
    plt.plot(x, y, color='red')
    plt.show()
Esempio n. 57
0
 def Calculate(self):
     t = 1
     while(t <= self.T):
         if(self.W[t - 1] > 0):
             tmp = 2 * bernoulli.rvs(self.p) - 1 + self.W[t-1]
             if(tmp >= 0):
                 self.W.append(tmp)
             else:
                 self.W.append(0)
         else:
             self.W.append(0)
         t+=1
Esempio n. 58
0
	def sample(self,n_samples,py,plot=False):
		"""samples Y according to py and corresponding features x1,x2 according to the gaussian for the corresponding class"""
		Y = bernoulli.rvs(py,size=n_samples)
		X = np.zeros((n_samples,2))
		for i in range(n_samples):
			if Y[i] == 1:
				X[i,:] = self.rv1.rvs()
			else:
				X[i,:] = self.rv0.rvs()
		if plot:
			self.plot(data=(X,Y))
		return X,Y
Esempio n. 59
0
def martingale(total_money, bet):
	while(bet <= 500):
		win = bernoulli.rvs(0.5)
		if(win):
			total_money += bet
			#print "I bet %i, and I won. I now have %i." %(bet, total_money)
			bet = 5
			return martingale(total_money, bet)
		else:
			total_money -= bet
			#print "I bet %i, and I lost. I now have %i." %(bet, total_money)
			return martingale(total_money, 2*bet)
	return total_money
Esempio n. 60
0
def create_infects(dI):
    B = lambda p: bernoulli.rvs(p, size=_sage_const_1 )[_sage_const_0 ]
    Infects = np.vectorize(B)(dI)

    for pos in range(Infects.shape[_sage_const_1 ]):
        vector = Infects[:,pos]
        indexes = np.where(vector == _sage_const_1 )[_sage_const_0 ]
        if indexes != []:
            i_0,i_n = min(indexes), max(indexes)
            if i_n-i_0+_sage_const_1  != len(indexes):
                Infects[:,pos][i_0:i_n] = _sage_const_1 

    return Infects