Пример #1
0
    def train(self):
        s = self

        for tr in s.trainset:
            inp.append(s.trainset[tr][1:])
            lst = [0] * 4
            lst[s.trainset[tr][0] / 90] = 1
            out.append(lst)

        #initialize all of our hidden layers (each has numHidden neurons)
        np.random.seed(int(time.time()))
        W1 = np.random.randn(s.train_dim, s.numHidden) / np.sqrt(s.train_dim)
        b1 = np.zeros((1, s.numHidden))

        W2 = np.random.randn(s.numHidden, s.numOutput) / np.sqrt(s.numHidden)
        b2 = np.zeros((1, s.numOutput))
        '''
		numWeights = s.train_dim
		last = 0
		for i in range(s.numLayers):
			s.model['w'+str(i)] = np.random.randn(numWeights, s.numHidden) / np.sqrt(numWeights)
			s.model['b'+str(i)] = np.zeros((1, s.numHidden))
			numWeights = s.numHidden
			last = i
		last += 1
		s.model['w'+str(last)] = np.random.randn(s.numHidden, s.numOutput) / np.sqrt(s.numHidden)
		s.model['b'+str(last)] = np.zeros((1, s.numOutput))
		'''
        inpa = np.array(inp)
        outa = np.array(out)
        for i in range(s.numPasses):
            # Forward propagation

            z1 = inpa.dot(W1) + b1
            a1 = s.sigmoid(z1)
            z2 = a1.dot(W2) + b2
            a2 = np.sigmoid(z2)
            probs = a2

            # Backpropagation
            delta3 = np.multiply(-(outa - probs), s.sigmoidprime(z2))
            # delta3[range(len(inpa)), outa] -= 1
            dW2 = (a1.T).dot(delta3)
            db2 = np.sum(delta3, axis=0, keepdims=True)
            delta2 = delta3.dot(W2.T) * s.sigmoidprime(z1)
            dW1 = np.dot(inpa.T, delta2)
            db1 = np.sum(delta2, axis=0)

            # Add regularization terms (b1 and b2 don't have regularization terms)
            dW2 += s.reg * W2
            dW1 += s.reg * W1

            # Gradient descent parameter update
            W1 += -s.epsilon * dW1
            b1 += -s.epsilon * db1
            W2 += -s.epsilon * dW2
            b2 += -s.epsilon * db2

            s.model = {'w1': W1, 'b1': b1, 'w2': W2, 'b2': b2}
            '''
def propagate(W, X, Y, b):
    m = X.shape[0]
    Y_cap = np.sigmoid(X, W, b)
    cost = (-1 / (2 * m)) * np.sum(
        np.dot(Y.T, np.log(Y_cap)) + np.dot((1 - Y).T, np.log(1 - Y_cap)))
    #grad = (1/m)*np.dot(X.T, Y_cap - Y)
    return cost
Пример #3
0
    def feedforward(self, a):
        """
        Return the ouput of the network if ``a`` is input.
        """
        for b, w in zip(self.biases, self.weights):
            a = np.sigmoid(np.dot(w, a) + b)

        return a
Пример #4
0
 def calc_output(self):
     '''
     根据式1计算节点的输出
     '''
     output = reduce(
         lambda ret, conn: ret + conn.upstream_node.output * conn.weight,
         self.upstream, 0)
     self.output = np.sigmoid(output)
Пример #5
0
def aeneunet(input,num_layers,num_weights):
	num_biases=num_layers
	x=sigmoid(input)
	x=np.append(1,x)
	theta=np.zeros(num_weights+1)##intialize all weights to zeros
	L1=(x).dot(np.transpose(theta))
	a1=np.sigmoid(L1)
	return a1
Пример #6
0
 def forward_propagation(self,scatter_plot_values):
     iL = scatter_plot_values.dot(self.syn1) + self.back_prop_1  # Map
     if self.activation_choice == 0:
         self.oL = np.tanh(iL)
         syn2_shift = self.oL.dot(self.syn2) + self.back_prop_2
         return np.exp(syn2_shift)
     elif activation_choice == 1:
         return np.sigmoid(iL)
     elif activation_choice == 2:
         return np.sin(iL)
Пример #7
0
def prob_X2(data):
    logits = data[:, :, 0]

    for w in weights[:-1]:
        logits = logits * w
        logits = np.sigmoid(logits)

    logits *= weights[-1]

    return naive_softmax(logits)
Пример #8
0
    def __init__(self,
                 size,
                 intelligence,
                 social,
                 digestion,
                 strength,
                 speed,
                 dexterity,
                 sex_drive,
                 mutation_factor=0.04,
                 hunger=0.5,
                 thirst=0.5,
                 health=1):

        self.gender = rnd.choice(["male", "female"])
        self.mutation_factor = mutation_factor

        self.max_size = rnd.normalvariate(size, size * mutation_factor)
        self.size = self.max_size / 10

        self.max_intelligence = rnd.normalvariate(
            intelligence, intelligence * mutation_factor)
        self.intelligence = self.max_intelligence / 10

        self.max_strength = rnd.normalvariate(strength,
                                              strength * mutation_factor)
        self.strength = self.max_strength / 10

        self.max_speed = rnd.normalvariate(speed, speed * mutation_factor)
        self.speed = self.max_speed / 10

        self.max_dexterity = rnd.normalvariate(dexterity,
                                               dexterity * mutation_factor)
        self.dexterity = self.max_dexterity / 10

        social = math.tan(social)
        social = rnd.normalvariate(social, social * mutation_factor)
        self.social = math.tanh(social)

        digestion = math.tan(digestion)
        digestion = rnd.normalvariate(digestion, digestion * mutation_factor)
        self.digestion = math.tanh(digestion)

        self.hunger = hunger
        self.thirst = thirst
        self.health = health
        self.fitness = np.min(self.hunger, self.thirst, self.health)

        sex_drive = math.tan(sex_drive * 2 - 1)
        sex_drive = rnd.normalvariate(sex_drive, sex_drive * mutation_factor)
        self.max_sex_drive = np.sigmoid(sex_drive)
        self.sex_drive = 0
Пример #9
0
def sigmoid(pre_activated):
    return np.sigmoid(pre_activated)
Пример #10
0
import numpy as np
import matplotlib.pyplot as plt

xdate = np.linspace(-5, 5)

plt.figure()

plt.plot(xdate, np.sigmoid(xdate), 'r')

plt.show()
Пример #11
0
def fwd(x, W_1, W_2, b_1, b_2):
    z_1 = x * W_1 + b_1
    a_1 = np.tanh(z_1)
    z_2 = a_1 * W_2 + b_2
    a_2 = np.sigmoid(z_2)
    return z_1, a_1, z_2, a_2
Пример #12
0
def sigmoid_py(x, parameter=None, weight=None):
    y = np.sigmoid(x).astype(np.float32)
    if not hasattr(y, "__len__"):
        y = [y]
    return y
def sigmoid_F(X):
    # forward pass for sigmoid activation
    out = np.sigmoid(X)
    for_backprop = out
    return out, for_backprop
Пример #14
0
	def forward(self,x):
		for b,w in zip(self.biases,self.weights):
			x=np.sigmoid(np.dot(w,x)+b)
		return x
Пример #15
0
def vectorized_sigmoid(z):
	return np.sigmoid(z)
Пример #16
0
import numpy as np
import timeit
from tfrbm.bbrbm import BBRBM

bm = BBRBM(n_visible=76, n_hidden=76)

#dataset = np.array([[0,0,0],[0,1,1],[1,0,1],[1,1,0]])
dataset = int(np.random.rand(76, ) > 0.5).reshape(1, -1)
#import sys
#i = int(sys.argv[1])

#x = dataset[i:i+1].copy()
x = np.array(dataset[0][:76]).reshape(1, -1)
while not np.all(np.logical_and(bm.reconstruct(x), x)):

    bm.fit(dataset, n_epoches=1000)
    print np.sigmoid(bm.reconstruct(x))
    print x
weights = bm.get_weights()
w = weights[0].tolist()
b = np.concatenate([weights[1], weights[2]]).tolist()
print w
print
print b
print
print v
        ux = np.reshape(ux, -1)
        uy = np.reshape(uy, -1)
        if sampleMeshIndices is not None:
            ux = np.take(ux, sampleMeshIndices, axis=0)
            uy = np.take(uy, sampleMeshIndices, axis=0)

        return np.reshape(ux, -1), np.reshape(uy, -1)

    def set_inicond(self, case=""):
        dx = np.min(self.geom.dX)
        L = np.min(self.geom.L)
        delta = 0.005 * L
        #self.front = lambda x: 1/(1 + np.exp(-x/delta))
        self.front = lambda x: 0.5 * (1 + np.tanh(x / delta))
        self.forward = lambda x: np.sigmoid(x / delta)
        if self.dim == 2:
            if case == "pacman":
                [X, Y] = self.geom.Xgrid
                phi0 = np.sqrt((X - L * (0.4))**2 + (Y - L *
                                                     (0.5))**2) - 0.2 * L
                q0 = self.front(phi0)
                return 1 - reshape(q0, -1)
            if case == "bunsen_flame":
                return np.reshape((1 - self.penalisation.weights), -1)
            else:
                return np.reshape(np.zeros(self.geom.N), -1)
        else:
            if case == "reaction1D":
                self.front = lambda x: 0.5 * (1 - np.tanh(x / 2))
                return self.front(