def test(self, test_data, label):
     n_pos = 0.0
     for i in range(len(test_data)):
         if sigmoid(self.W, test_data[i]) >= 0.5 and self.label[i] == 1:
             n_pos += 1
         elif sigmoid(self.W, test_data[i]) < 0.5 and self.label[i] == 0:
             n_pos += 1
     
     print n_pos, len(test_data), n_pos / len(test_data)
     print self.W
 def NLL(self, w):
     self.likelihood = 0.0
     for index, sample in enumerate(self.data):
         mu = sigmoid(w, sample)
         label = self.label[index]
         self.likelihood -= np.log(mu ** label * (1 - mu) ** (1 - label)) 
     return self.likelihood +  self.beta / 2 * np.dot(w, w)
예제 #3
0
 def set_activation(self, i, vector):
     self.net[i] = inner_product(self.weight[i], vector)
     self.output[i] = sigmoid(self.net[i], 1.0)
     if self.output[i] > 0.5:
         self.setout[i] = 1.0
     else:
         self.setout[i] = 0.0
 def gradient(self, w, lst_data, lst_label):
     g = np.zeros(len(w))
     for index, data in enumerate(lst_data):
         mu = sigmoid(w, data)
         label = lst_label[index]
         g += data * (mu - label) 
     g += np.array(w) * self.beta
     return g
예제 #5
0
    def think(self, inputs=[]):
        if len(inputs) != self.n_inputs:
            raise ValueError('Incorrect input')

        for i in range(self.n_inputs):
            self.a_inputs[i] = inputs[i]

        for i in range(self.n_hidden_layers):
            total = 0.0
            for j in range(len(self.a_inputs)):
                total += self.a_inputs[j] * self.weight_input[j][i]
            self.a_hidden_layers[i] = sigmoid(total)

        for i in range(self.n_outputs):
            total = 0.0
            for j in range(len(self.a_hidden_layers)):
                total += self.a_hidden_layers[j] * self.weight_output[j][i]
            self.a_outputs[i] = sigmoid(total)
        return self.a_outputs
예제 #6
0
def get_lambdas(windows, sampling):
    windows = int(windows)
    step = int(windows / 2)
    lambdas = []
    lmbda_1 = []
    lmbda_2 = []
    k_dic = {
        'sigmoidal': -1.1,
        'linear': 1000,
        'exponential': -1.1,
        'reverse_exponential': 1.1
    }
    k = k_dic[sampling]

    if sampling == 'sigmoidal':
        for i in range(0, step + 1):
            lmbda1 = '{:.3f}'.format(
                0.5 * (f.sigmoid(float(i) / float(step), k) + 1))
            lmbda2 = '{:.3f}'.format(
                0.5 * (-f.sigmoid(float(i) / float(step), k) + 1))
            lmbda_1.append(lmbda1)
            lmbda_2.append(lmbda2)

        lmbda_2 = lmbda_2[1:]

        for i in reversed(lmbda_2):
            lambdas.append(i)

        for i in lmbda_1:
            lambdas.append(i)

    else:
        for i in range(0, windows + 1):
            lmbda = '{:.3f}'.format(f.sigmoid(float(i) / float(windows), k))
            lambdas.append(lmbda)

    lambdas = lambdas[::-1]
    return lambdas
예제 #7
0
    def predict(self, x):
        """calculate output given input and current parameters: W1, b1, W2, b2 """
        W1, W2, b1, b2 = self.params['W1'], self.params['W2'], self.params[
            'b1'], self.params['b2']

        # input --> hidden layer : sigmoid
        z2 = np.dot(x, W1) + b1
        a2 = sigmoid(z2)

        # hidden layer --> output : softmax
        z3 = np.dot(a2, W2) + b2
        y = softmax(z3)

        return y
예제 #8
0
	def feed_forward(self, input_array):
		value_layers = []
		value_layers.append(input_array)
		
		for i in range(self.num_layer - 1):
			tab = []
			for j in range(len(self.layers[i].neurons)):
				nb = 0				
				for k in range(len(self.layers[i].neurons[j].weights)):
					nb += self.layers[i].neurons[j].weights[k] * value_layers[i][k]		
				nb += self.layers[i].neurons[j].bias
				tab.append(sigmoid(nb))
			value_layers.append(tab)
		return value_layers[-1]
def forward_prop(X, parameters, activation_func, Keep_prob):
    cache = {}
    activations = {}
    activations["A" + str(0)] = X
    L = len(parameters) // 2

    for l in range(1, L):
        cache["Z" + str(l)] = np.dot(
            parameters["W" + str(l)],
            activations["A" + str(l - 1)]) + parameters["b" + str(l)]
        if (activation_func == "sigmoid"):
            activations["A" + str(l)] = sigmoid(cache["Z" + str(l)])
        elif (activation_func == "relu"):
            activations["A" + str(l)] = relu(cache["Z" + str(l)])
        elif (activation_func == "tanh"):
            activations["A" + str(l)] = np.tanh(cache["Z" + str(l)])
        else:
            print("Error. Invalid Activation Function.")

        # Dropout Regularization
        cache["d" + str(l)] = np.random.rand(
            activations["A" + str(l)].shape[0],
            activations["A" + str(l)].shape[1])
        cache["d" + str(l)] = cache["d" + str(l)] < Keep_prob
        activations["A" + str(l)] = np.multiply(activations["A" + str(l)],
                                                cache["d" + str(l)])
        activations["A" + str(l)] /= Keep_prob

    cache["Z" + str(L)] = np.dot(
        parameters["W" + str(L)],
        activations["A" + str(L - 1)]) + parameters["b" + str(L)]
    activations["A" + str(L)] = sigmoid(cache["Z" + str(L)])

    ## The Dropout could've been iniatilized by running another for-loop over the activations
    ##  but in the present way, there's no need of another for-loop

    return cache, activations
예제 #10
0
    def gradient(self, x, t):
        W1, W2 = self.params['W1'], self.params['W2']
        b1, b2 = self.params['b1'], self.params['b2']
        grads = {}
        
        batch_num = x.shape[0]
        
        # forward
        a1 = np.dot(x, W1) + b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, W2) + b2
        y = softmax(a2)
        
        # backward
        dy = (y - t) / batch_num
        grads['W2'] = np.dot(z1.T, dy)
        grads['b2'] = np.sum(dy, axis=0)
        
        da1 = np.dot(dy, W2.T)
        dz1 = sigmoid(a1) * da1
        grads['W1'] = np.dot(x.T, dz1)
        grads['b1'] = np.sum(dz1, axis=0)

        return grads
    def predict(self, x):
        """Predict which value is likely

        Args:
            x (numpy.ndarray): image data which mean input to NN

        Returns:
            [numpy.ndarray]: predict probability using softmax function
        """
        W1, W2 = self.params['W1'], self.params['W2']
        b1, b2 = self.params['b1'], self.params['b2']

        #Calculate NN (forward)
        a1 = np.dot(x, W1) + b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, W2) + b2
        y = softmax(a2)

        return y
예제 #12
0
    def gradient(self, x, t):
        W1,W2 = self.params['W1'], self.params['W2']
        b1,b2 = self.params['b1'],self.params['b2']
        grads={}

        a1=np.dot(x, W1)+b1
        z1=sigmoid(a1)
        a2=np.dot(z1, W2)+b2
        y =softmax(a2)

        dLda2=(y-t)
        grads['W2']=np.dot(z1.T,dLda2)
        grads['b2']=dLda2[0]

        dLdz1=np.dot(dLda2,W2.T)
        dLda1=z1*(1-z1)*dLdz1

        grads['W1']=np.dot(x.T,dLda1)
        grads['b1']=dLda1[0]

        return grads
    def gradient(self, x, t):
        w1, w2 = self.dict['w1'], self.dict['w2']
        b1, b2 = self.dict['b1'], self.dict['b2']
        grads = {}

        a1 = np.dot(x, w1) + b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, w2) + b2
        y = softmax(a2)

        num = x.shape[0]
        dy = (y - t) / num
        grads['w2'] = np.dot(z1.T, dy)
        grads['b2'] = np.sum(dy, axis=0)

        da1 = np.dot(dy, w2.T)
        dz1 = sigmoid_grad(a1) * da1
        grads['w1'] = np.dot(x.T, dz1)
        grads['b1'] = np.sum(dz1, axis=0)

        return grads
예제 #14
0
 def squash_value(self):
     self.value = functions.sigmoid(self.value)
예제 #15
0
def SpikeGeneration(neuron,control):#neuron contains input spike-trains and a sp mech 

	Nsteps = int(neuron.total_time/neuron.dt) #total number of timesteps

	MP = np.zeros((Nsteps,),dtype='float') #membrane potential that the model uses.
                                  
	inp_tmp = copy.copy(neuron.input) #if no copy input is emptied (we use pop)

	output = [] #where output spike trains are stored.

	for g in range(neuron.compartments): #compartments = groups with their own NL

		MP_part = np.zeros((Nsteps,),dtype='float') #memb.pot. before NL within comp. 

		nsyn = int(neuron.N/neuron.compartments) #number of synapse in a group.

		for cnt in range(nsyn): #loop over the synapses in the compartment/group

			in_syn = inp_tmp.pop() #inp_tmp is a list of list of input spike trains.

			for t in in_syn: # input spikes (in ms)

				t = int(t/neuron.dt) # conversion ms -> time-step number.

				len_ker = neuron.synapses.len_ker_st #[time-steps] 
				bndup = min(t+len_ker,Nsteps) 
				bndupk = min(len_ker,Nsteps-t) #because could go beyons maximal size.
				size = neuron.PSP_size #~25 in our case.
				kerns = neuron.synapses.ker # ~1 of size.
				indik = int(g*nsyn+cnt) # kerns is for the entire neurons.

				MP_part[t:bndup] = MP_part[t:bndup] + size*kerns[indik,:bndupk]

# Why not fftconvolve ? -> because would imply create a huge array of zeros and ones.
# and this is not a slow part of the code anyway. + for loop is "sparse".

		if control=='on': #to be sure that NL is correct and correctly applied.

			h = np.histogram(MP_part,bins=1000.,range=[-80.,80.])

			MP_afterNL = fun.sigmoid(neuron.non_linearity[g],MP_part)
			hpost = np.histogram(MP_afterNL,bins=1000.,range=[-80.,80.])

			plt.plot(h[1][:-1],h[0])
			plt.plot(hpost[1][:-1],hpost[0])
			plt.show()

			x = np.arange(-80.,80.,0.1)

			plt.plot(x,fun.sigmoid(neuron.non_linearity[g],x))
			plt.plot(x,x)
			plt.show()


		MP = MP + fun.sigmoid(neuron.non_linearity[g],MP_part) 

	for t in range(Nsteps): #spike generation itself.

		lamb = neuron.lambda0*np.exp((MP[t]-neuron.threshold)/neuron.delta_v)

		p = lamb*neuron.dt #first-order of (1-exp(-lamb*dt))

		if p>random.random():

			output.append(t*neuron.dt) #convert back to [ms]
			bndup = min(Nsteps,t+neuron.ASP_total_st)
			bndupk = min(neuron.ASP_total_st,Nsteps-t)
			size = neuron.ASP_size # ~30
			expfun = fun.exp_fun(neuron.ASP_time,neuron.dt,neuron.ASP_total) # ~1

			MP[t:bndup] = MP[t:bndup] - size*expfun[:bndupk] # can't convolve here.

	return output, MP
예제 #16
0
 def forward(self, x):
     self.y = sigmoid(x)
     return self.y
예제 #17
0
 def apply(self, x):
     return sigmoid(self.theta.T @ x)
예제 #18
0
 def activation(self):
     """
     Returns the sigmoid of the inproduct of the weights and the inputs.
     """
     self.inproduct = np.dot(self.inputs, self.weights) + self.bias
     return sigmoid(self.inproduct)
예제 #19
0
        if objectId==objectNum:
            if frameNumber>=startFrame and frameNumber<=endFrame:
                
                   
                x,y,w,h=float(box.attrib['xc']),float(box.attrib['yc']),float(box.attrib['w']),float(box.attrib['h'])
                plotArr=finarr(x,y,h,w,pictureName,rArr)
                shift=plotArr[0][0]
                D=plotArr[0][1]
                
                ind=D.index(max(D))
                shif=shift[ind]
                sig=0.1
                D=norm(D)
                if (frameCnt%10==0) :
                    print("completed")
                    plt.plot(shift,sigmoid(D,sig),'--')
                
                #similarity.append(D)

                for g in range(1,3):
                    shift1=plotArr[g][0]
                    D1=plotArr[g][1]
                    ind1=D1.index(max(D1))
                    if g==1:
                        shif1=shift1[ind1]
                    else :
                        shif2=shift1[ind1]
            xn=xp=x
            if (frameNumber>=14):
                if (shif<=10 and shif>=-10):
                    xp=xn=int(x+shif)
예제 #20
0
            edgecolors='black',
            label='Not admitted')

x1 = np.array([X[:, 1].min(), X[:, 1].max()])
x2 = (-1 / theta[2]) * (theta[1] * x1 + theta[0])
plt.plot(x1, x2)

plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')
plt.xlim([30, 100])
plt.ylim([30, 100])
plt.legend(scatterpoints=1)
plt.show()

input('Program paused. Press enter to continue.\n')
plt.close()

# ########## Part4: Predict and Accuracies ##########
score_x = np.array([1, 45, 85]).reshape((3, 1))
prob = sigmoid(np.dot(theta.reshape(-1, 1).T, score_x))[0, 0]
print('For a student with scores 45 and 85, we predict an admission '
      'probability of {0:.3f}'.format(prob))
print('Expected value: 0.775 +/- 0.002\n')

p = predict(theta.reshape(-1, 1), X)

print('Train Accuracy: {0:.1f}'.format(np.mean(p == y) * 100))
print('Expected accuracy (approx): 89.0\n')

plt.close()
예제 #21
0
 def fit_design_matrix_logistic_regression(self, descent_method = 'SGD-skl', eta = 0.001, Niteration = 200, m = 5, verbose = False):
     '''solve the model using logistic regression. 
     Method 'SGD-skl' for SGD scikit-learn,
     method 'SGD' for SGD with diminishing step length with minibatches,
     method 'GD' for plain gradient descent'''
     
     n, p = np.shape(self.X)
     if descent_method == 'skl-SGD':
         sgdreg = SGDRegressor(max_iter = 50, penalty=None, eta0=eta, fit_intercept = True)
         sgdreg.fit(self.X, self.inst.y_1d.ravel())
         self.betas = sgdreg.coef_
         self.y_tilde = sigmoid([email protected] + sgdreg.intercept_)
         if verbose:
             # Cost function
             m = self.X.shape[0]
             cost = - (1 / m) * np.sum(self.inst.y_1d.ravel() * self.y_tilde + np.log(sigmoid(-self.y_tilde)))
             print('cost is', cost)
             
         return self.y_tilde, sgdreg.coef_
     
     elif descent_method == 'GD':
         #implement own gradient descent algorithm
         beta = np.ones((p, 1))
         X = self.X
         y = self.inst.y_1d[:, np.newaxis]
         for iter in range(Niteration):
             #Calculate probabilities
             y_tilde_iter = X @ beta
             prob = sigmoid(y_tilde_iter)
             compl_prob = sigmoid(-y_tilde_iter)
             
             #Calculate gradients
             gradients =  - X.T @ (y - prob)
             
             #Update parameters
             beta -= eta*gradients * 2./len(y_tilde_iter)
             
             if verbose:
                 # Cost function
                 m = X.shape[0]
                 cost = - (1 / m) * np.sum(y * y_tilde_iter + np.log(compl_prob))
                 print('cost is', cost)
         self.betas = beta
         self.y_tilde = sigmoid(self.X @ beta)
         return self.y_tilde, self.betas
     
     elif descent_method == 'SGD':
         #implement own stochastic gradient descent algorithm
         self.inst.sort_in_k_batches(m, random=True, minibatches = True)
         
         #initialize step length. The step will start from the input value of
         #eta and will diminish at the rate of t0/(t + t1) where t = epoch*m + i
         t0 = 1.0
         t1 = t0/eta
         X = self.X
         y = self.inst.y_1d[:, np.newaxis]
         epochs = int(Niteration / m)
         beta = np.ones((p, 1))
         for epoch in range(0, epochs + 0):
             for i in range(m):
                 
                 # Pick random minibatch
                 minibatch_k = np.random.randint(m)
                 minibatch_data_idxs = self.inst.m_idxs[minibatch_k]
                 X_k = X[minibatch_data_idxs,:]
                 y_k = y[minibatch_data_idxs]
                 
                 # Calculate probabilities
                 y_tilde_iter = X_k @ beta
                 prob = sigmoid(y_tilde_iter)
                 compl_prob = sigmoid(-y_tilde_iter)
                 
                 # Evaluate gradients
                 gradients =  - X_k.T @ (y_k - prob)
                 
                 # Update steplength
                 t = epoch*m+i
                 eta = t0/(t+t1)
                 
                 # Adjust parameters
                 beta -= eta*gradients * 2./len(y_tilde_iter)
                 
                 if verbose:
                     # Cost function
                     m = X.shape[0]
                     cost = - (1 / m) * np.sum(y * y_tilde_iter + np.log(compl_prob))
                     print('cost is', cost)
         self.betas = beta
         self.y_tilde = sigmoid(self.X @ beta)
         return self.y_tilde, self.betas
예제 #22
0
 def calculate_certainty(short_mavg, long_mavg):
     slope_difference = (short_mavg.iloc[-1] / short_mavg.iloc[-2]) - (
         long_mavg.iloc[-1] / long_mavg.iloc[-2])
     certainty = abs((functions.sigmoid(slope_difference) - 0.5) * 2)
     return certainty
예제 #23
0
          max(sample.area_ratios), '\n')
    print('accuracy is: ', sample.accuracy)
    print('roc-auc score is: ', sample.rocaucs)
    print('Area ratio is: ', sample.area_ratios, '\n')

else:
    #Dont run k-fold CV
    #collect information about training set
    y_tilde_train, betas = model.fit_design_matrix_logistic_regression(
        descent_method=desc_method, eta=input_eta, Niteration=Niterations, m=m)
    _, target_train = CDds.rescale_back(x=CDds.x_1d, y=CDds.y_1d, split=True)
    target_train = [int(elem) for elem in target_train]

    #collect information about test set
    X_test = model.create_design_matrix(x=CDds.test_x_1d)
    y_tilde = sigmoid(model.test_design_matrix(betas, X=X_test))
    _, target = CDds.rescale_back(x=CDds.test_x_1d,
                                  y=CDds.test_y_1d,
                                  split=True)
    _, y_tilde_scaled = CDds.rescale_back(x=CDds.test_x_1d,
                                          y=y_tilde,
                                          split=True)
    target = [int(elem) for elem in target]

    #Make onehot version of results
    y_tilde_train_onehot = np.column_stack((1 - y_tilde_train, y_tilde_train))
    y_tilde_onehot = np.column_stack((1 - y_tilde, y_tilde))

    # Print metrics
    print('Number of epochs: ', int(Niterations / m))
    print(
예제 #24
0
import numpy as np
import sys
sys.path.append("..\tools")
from functions import sigmoid

inputs = np.array([0.7, -0.3])
weights = np.array([0.1, 0.8])
bias = -0.1

# TODO: Calculate the output
weights_sum = np.dot(inputs, weights)
print("weights_sum: {} ".format(weights_sum))
linear_sum = weights_sum + bias
print("linear_sum: {}".format(linear_sum))

#calculate sigmoid of the linear sum
output = sigmoid(linear_sum)

print('Output:')
print(output)
x = input_data

for i in range(hidden_layer_size):
    if i != 0:
        x = activations[i-1]

    # 权重初始化
    # w = np.random.randn(node_num, node_num) * 1
    # w = np.random.randn(node_num, node_num) * 0.01
    w = np.random.randn(node_num, node_num) * np.sqrt(1.0 / node_num)
    # w = np.random.randn(node_num, node_num) * np.sqrt(2.0 / node_num)


    z = np.dot(x, w)

    # 激活值
    a = sigmoid(z)
    # a = relu(z)
    # a = tanh(z)

    activations[i] = a

# 可视化
for i, a in activations.items():
    plt.subplot(1, len(activations), i+1)
    plt.title(str(i+1) + "-layer")
    if i != 0: plt.yticks([], [])
    # plt.xlim(0.1, 1)
    plt.ylim(0, 7000)
    plt.hist(a.flatten(), 30, range=(0,1))
plt.show()
예제 #26
0
 def feedforward(self):
   self.layer1 = functions.sigmoid(np.dot(self.input, self.weights1))
   self.output = functions.sigmoid(np.dot(self.layer1, self.weights2))
예제 #27
0
 def forward(self, x):
     out = sigmoid(x)
     self.out = out
     return out
예제 #28
0
print('theta coefficients are:', theta)
# *********** algorithm learned ************

theta = np.reshape(theta, (-1, 1))
bias = np.ones((np.shape(X_test)[0], 1))
X_test2 = X_test.values

for i in range(np.shape(X_test2)[0]):
    for j in range(np.shape(X_test2)[1]):
        if np.isnan(X_test2[i][j]):
            X_test2[i][j] = 10

y_pred = clf.predict(X_test2)
X_test = np.concatenate((bias, X_test), axis=1)

prediction = sigmoid(np.matmul(X_test, theta))
prediction = np.ravel(prediction, 1)
for it in range(len(prediction)):
    if prediction[it] >= 0.5:
        prediction[it] = 1
    else:
        prediction[it] = 0
# print(prediction)

passengerId = np.zeros(np.shape(X_test)[0])
for it in range(np.shape(X_test)[0]):
    passengerId[it] = 892 + it

res = {'PassengerId': passengerId, 'Survived': prediction}
res2 = {'PassengerId': passengerId, 'Survived': y_pred}
df = pd.DataFrame(res)
예제 #29
0
# min max scale from 0-255 to 0-1 scale
x_train = (x_train - np.min(x_train)) / (np.max(x_train) - np.min(x_train))
x_test = (x_test - np.min(x_test)) / (np.max(x_test) - np.min(x_test))
x_dims = (1, 28, 28)
num_classes = 10
class_names = np.unique(y_train)

# Conv layers with x dims 2 filters with kernel 3x3 stride of 1 and no padding
conv1 = layers.Conv(x_dims,
                    n_filter=2,
                    h_filter=3,
                    w_filter=3,
                    stride=1,
                    padding=0)
# activation for layer 1 'sigmoid'
sig = mf.sigmoid()
# MaxPool layer 2x2 stride of 1
pool1 = layers.Maxpool(conv1.out_dim, size=2, stride=2)
# Conv layer with 2 filters kernel size of 3x3 stride of 1 and no padding
conv2 = layers.Conv(pool1.out_dim,
                    n_filter=2,
                    h_filter=3,
                    w_filter=3,
                    stride=1,
                    padding=0)
# activation for layer 2 rectified linear
relu = mf.ReLU()
# MaxPool layer 2x2 stride 1
pool2 = layers.Maxpool(conv2.out_dim, size=2, stride=1)
# Flatten the matrix
flat = layers.Flatten()
예제 #30
0
    # a: Activation
    # z: Value
    # cost: Cost
    # _: Division

    # İleri Besleme (Feed Forward)

    # Faz 1
    # Giriş değerlerimizi weight ile çarpıp bias ekliyoruz.
    # Böylece her bir bayrak için hidden node kadar değer
    # elde etmiş oluyoruz.
    zh = np.dot(feature_set, wh) + bh  # 400x75 . 75x16 = 400x16

    # Elde ettiğimiz bu değerin aktivasyon fonksiyonunu ne
    # kadar tetiklediğini buluyoruz.
    ah = f.sigmoid(zh)  # 400x16

    # Faz 2
    # Hidden node'lardan elde ettiğimiz değerleri weight ile
    # çarpıp bias ekliyoruz. Böylece her bir bayrak için output
    # node kadar değer elde etmiş oluyoruz.
    zo = np.dot(ah, wo) + bo  # 400x16 . 16x100 = 400x100

    # Elde ettiğimiz bu değerin aktivasyon fonksiyonunu ne
    # kadar tetiklediğini buluyoruz.
    ao = f.softmax(zo)  # 400x100

    # Geri Yayılım (Back Propagation)

    # Faz 1
    # Elde ettiğimiz sonucun, gerçek sonuç ile arasındaki
예제 #31
0
 def predict(self, x_test):
   y_pred = sigmoid(self.ps_coeff*np.matmul(x_test, self.w.T))
   y_pred[np.where(y_pred >= 1/2)] = 1
   y_pred[np.where(y_pred < 1/2)] = 0
   return y_pred
예제 #32
0
 def forward(self, data):
     res = sigmoid(self.W.dot(data) + self.b)
     cache = data, res
     return res, cache
예제 #33
0
def SpikeGeneration(inp,neuron,control,string):#neuron contains input spike-trains and a sp mech 

	if string=='training':

		T = neuron.total_time

	elif string=='test':

		T = neuron.total_time_test

	Nsteps = int(T/neuron.dt) #total number of timesteps

	MP = np.zeros((Nsteps,),dtype='float') #membrane potential that the model uses.
                                  
	inp_tmp = copy.copy(inp) #if no copy input is emptied (we use pop)

	output = [] #where output spike trains are stored.

	for g in range(neuron.Ng): #Ng = groups with their own NL

		MP_part = np.zeros((Nsteps,),dtype='float') #memb.pot. before NL within comp. 

		nsyn = int(neuron.N/neuron.Ng) #number of synapse in a group.

		for cnt in range(nsyn): #loop over the synapses in the compartment/group

			in_syn = inp_tmp.pop() #inp_tmp is a list of list of input spike trains.S

			for t in in_syn: # input spikes (in ms)

				t = int(t/neuron.dt) # conversion ms -> time-step number.

				len_ker = neuron.synapses.len_ker_st #[time-steps] 
				bndup = min(t+len_ker,Nsteps) 
				bndupk = min(len_ker,Nsteps-t) #because could go beyons maximal size.
				size = neuron.PSP_size #~25 in our case.
				kerns = neuron.synapses.ker # ~1 of size.
				indik = int(g*nsyn+cnt) # kerns is for the entire neuron.

				MP_part[t:bndup] = MP_part[t:bndup] + size*kerns[indik,:bndupk]

# Why not fftconvolve ? -> because would imply create a huge array of zeros and ones.
# and this is not a slow part of the code anyway. + for loop is "sparse".

		if control=='on':
		
			plt.plot(MP_part)
			print g
			plt.plot(fun.sigmoid(neuron.non_linearity[g],MP_part))
			plt.show()
		
			h = np.histogram(MP_part,bins=1000.,range=[-2.,2.])
			h_after = np.histogram(fun.sigmoid(neuron.non_linearity[g],MP_part),bins=1000.,range=[-2.,2.])

			plt.plot(h[1][:-1],h[0])
			plt.plot(h_after[1][:-1],h_after[0])
			plt.show()

		MP = MP + fun.sigmoid(neuron.non_linearity[g],MP_part) 

	for t in range(Nsteps): #spike generation itself.

		lamb = neuron.lambda0*np.exp((MP[t]-neuron.threshold)/neuron.delta_v)

		p = lamb*neuron.dt #first-order of (1-exp(-lamb*dt))

		if p>random.random():

			output.append(t*neuron.dt) #convert back to [ms]
			bndup = min(Nsteps,t+neuron.ASP_total_st)
			bndupk = min(neuron.ASP_total_st,Nsteps-t)
			size = neuron.ASP_size # ~30
			expfun = fun.exp_fun(neuron.ASP_time,neuron.dt,neuron.ASP_total) # ~1

			MP[t:bndup] = MP[t:bndup] - size*expfun[:bndupk] # can't convolve here.

	return output, MP, MP_part
예제 #34
0
def logistic_regression(x, y):
    n = len(y)
    return lambda w: (sum(y * np.log(sigmoid((w * x).sum(axis=1))) +
                          (1 - y) * np.log(1 - sigmoid((w * x).sum(axis=1)))) /
                      (-n), logistic_regression_derivative(x, y)(w))
예제 #35
0
import numpy as np
import functions as f
import pickle
import glob

pickle_in = open("rick.pickle", "rb")
wh, bh, wo, bo = pickle.load(pickle_in)
flagNames = f.getFlagNames()

for file in glob.glob("./flags/*/*.jpg"):
    fileName = file.split("\\")[2][0:-4]
    np.set_printoptions(suppress=True)
    zh = np.dot(np.vstack([f.getPixels(file)]), wh) + bh
    ah = f.sigmoid(zh)
    zo = np.dot(ah, wo) + bo
    ao = f.softmax(zo)
    flagIndex = np.where(ao == np.amax(ao))[1][0]
    print(fileName, flagNames[flagIndex], int(
        round(np.amax(ao), 2)*100), "percent")
예제 #36
0
def logistic_regression_derivative(x, y):
    n = len(y)
    return lambda w: ((sigmoid(
        (w * x).sum(axis=1)) - y).values.dot(x.values)) / n
예제 #37
0
        dist1.append(z)
    L=0
    for i in dist1:
        L += i
    D1.append(L) 

D1.reverse()
shift1.reverse()
print(D1)
print(shift1)

D=D1+D
shift=shift1+shift

print(D)
print(shift)

plt.plot(shift,D)
plt.show()

D=norm(D)
plt.plot(shift,D)  
plt.show()

for s in [ 0.2]:
    plt.plot(shift, sigmoid(D,s))
plt.show()



예제 #38
0
 def __call__(self, graph, T):
     _, s = self.forward(graph, T)
     p = F.sigmoid(s)
     if p > 1 / 2: return 1
     else: return 0