def test(self):
        np.random.shuffle(self._test_data)
        test_X = self._test_data[:, :15]
        Y = self._test_data[:, -1]

        Y_real = []
        for y in Y:
            Y_real.append([y])

        Y_real = np.asarray(Y_real)

        inputLayer = test_X

        sum_sinapse0 = np.dot(inputLayer, self._weights0)
        hiddenLayer = libfunc.sigmoid(sum_sinapse0)

        sum_sinapse1 = np.dot(hiddenLayer, self._weights1)
        outputLayer = libfunc.sigmoid(sum_sinapse1)

        plt.clf()
        plt.plot(Y)
        plt.plot(outputLayer)
        plt.legend(['Real', 'Estimado'], loc='upper right')
        plt.grid()
        plt.savefig(
            os.path.dirname(__file__) + "\\graphs\\test\\" + "test_" +
            str(self._epochs) + str(self._tent) + ".png")
        # plt.show()

        return self.nash_sutcliffe(Y_real, outputLayer)
コード例 #2
0
    def _forward_propagation(self, x):
        # layer input -> hidden
        z1 = np.dot(x, self.W_xh) + self.b_h
        a1 = af.sigmoid(z1)

        # layer hidden -> output
        z2 = np.dot(a1, self.W_hy) + self.b_y
        y_hat = af.sigmoid(z2)

        return z1, a1, z2, y_hat
コード例 #3
0
ファイル: network.py プロジェクト: FrankieYin/MLStarter
 def _feedforward(self, a):
     """
     calculate the output of the network
     :param a: a numpy ndarray (n, 1)
     :return: a single number corresponding to the output of the network
     """
     for b, w in zip(self.biases, self.weights):
         # To compensate for the increase in number of neurons activated
         # we divide the weights and biases by the dropout size.
         if self.dropout_enabled:
             a = af.sigmoid(
                 np.dot(w / self.dropout_size, a) + b / self.dropout_size)
         else:
             a = af.sigmoid(np.dot(w, a) + b)
     return a
コード例 #4
0
def linear_activation_forward(A_prev, W, b, activation, output_size):
    #     """
    #     Implement the forward propagation for the LINEAR->ACTIVATION layer

    #     Arguments:
    #     A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
    #     W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
    #     b -- bias vector, numpy array of shape (size of the current layer, 1)
    #     activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
    #     output_size - bit width of output data

    #     Returns:
    #     A -- the output of the activation function, also called the post-activation value
    #     cache -- a python tuple containing "linear_cache" and "activation_cache";
    #              stored for computing the backward pass efficiently
    #     """

    if activation == "sigmoid":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = sigmoid(Z, output_size)

    elif activation == "relu":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = relu(Z, output_size)

    assert (A.shape == (W.shape[0], A_prev.shape[1]))
    cache = (linear_cache, activation_cache)

    return A, cache
def linear_activation_forward(a_prev,
                              w,
                              b,
                              activation="sigmoid",
                              keep_prob=1.0):

    if activation == "sigmoid":
        z, linear_cache = linear_forward(a_prev, w, b)
        a, activation_cache = sigmoid(z)
    elif activation == "ReLu":
        z, linear_cache = linear_forward(a_prev, w, b)
        a, activation_cache = ReLu(z)
    elif activation == "leaky_ReLu":
        z, linear_cache = linear_forward(a_prev, w, b)
        a, activation_cache = leaky_ReLu(z)

    # Dropout Regularization
    d = np.random.rand(a.shape[0], a.shape[1])
    d = np.where(d < keep_prob, 1,
                 0)  # if keep_prob = 1 - no effect of dropout!
    a = np.multiply(a, d)

    assert (a.shape == (w.shape[0], a_prev.shape[1]))
    #cache of layer l contains ((a[l-1] , w[l] , b[l]) , z[l])
    cache = (linear_cache, activation_cache, d)
    return a, cache
コード例 #6
0
def predict(features, weights):
  '''
  Returns 1D array of probabilities
  that the class label == 1
  '''
  z = np.dot(features, weights)
  return sigmoid(z)
コード例 #7
0
def example(game, n=5):
    """
    train neural network

    :param game: val_fun -> list of data of game [{'x':[...], 'y': [...]} ... ]
    :return:
    """
    shape = [2, 4, 1]
    act_fun = sigmoid()
    fit_kwargs = {}
    max_epoch = 10**3

    model = FeedForwardNeuralNet(shape=shape, act_fun=act_fun)
    model.settings(name='model')
    model.load()

    algorithm = NAG()
    algorithm.compile(model=model)
    algorithm.settings(max_epoch=max_epoch, check_loss_freq=0)

    for _ in range(n):
        training_data = game(val_fun=model)
        algorithm.compile(data=training_data)
        algorithm.fit(**fit_kwargs)
        model.save()
コード例 #8
0
def process_hidden_layers(cache, weights_biases, num_layers):
    for layer in range(1, num_layers + 1):
        Z = np.dot(weights_biases['W' + str(layer)],
                   cache['A' + str(layer - 1)])
        A = af.sigmoid(Z)
        cache['Z' + str(layer)] = Z
        cache['A' + str(layer)] = A
コード例 #9
0
def activation_forward(A_prev, W, b, activation_way):
    """

    :param A_prev:
    :param W:
    :param b:
    :param activation_way: -- a text string indicate the way we activate this layer, "sigmoid","relu",...
    :return:
    A -- the activation output of this layer
    cache -- a dictionary contains "linear_cache' and "activation_cache"
    """
    cache = dict()
    if activation_way == "relu":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = relu(Z)
    elif activation_way == "sigmoid":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = sigmoid(Z)
    elif activation_way == "tanh":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = tanh(Z)
    elif activation_way == "softmax":
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = softmax(Z)

    cache["linear_cache"] = linear_cache
    cache["activation_cache"] = activation_cache

    return A, cache
コード例 #10
0
 def model(self):
     self.N, self.D = self.X.shape
     self.N_t, self.D_t = self.Xtest.shape
     ones = np.array([[1] * self.N]).T
     ones_t = np.array([[1] * self.N_t]).T
     self.Xb = np.concatenate((ones, self.X), axis=1)
     self.X_t_b = np.concatenate((ones_t, self.Xtest), axis=1)
     self.w = np.random.randn(self.D + 1)
     z = self.Xb.dot(self.w)
     self.Yhat = sigmoid(z)
コード例 #11
0
    def test(self):
        test_X = self._test_data[:, :15]
        Y = self._test_data[:, -1]

        inputLayer = test_X

        sum_sinapse0 = np.dot(inputLayer, self._weights0)
        hiddenLayer = libfunc.sigmoid(sum_sinapse0)

        sum_sinapse1 = np.dot(hiddenLayer, self._weights1)
        outputLayer = libfunc.sigmoid(sum_sinapse1)
        print()
        plt.clf()
        plt.plot(Y)
        plt.plot(outputLayer)
        plt.legend(['Observed', 'Predict'], loc='upper right')
        plt.grid()
        plt.savefig(
            os.path.dirname(__file__) + "\\graphs\\test\\" + "test_" +
            str(self._epochs) + ".png")
コード例 #12
0
    def train(self):
        for i in range(self.epochs * 100):
            if i % 100 == 0:
                print(cross_entropy(self.Y, self.Yhat))

            # update weights
            self.w += self.lr * (np.dot((self.Y - self.Yhat).T, self.Xb) -
                                 l2_regularization(self.l, self.w, True))
            self.Yhat = sigmoid(self.Xb.dot(self.w))

        print('Final w:', self.w)
コード例 #13
0
    def validation(self):
        np.random.shuffle(self._valid_data)
        valid_X = self._valid_data[:, :7]
        Y = self._valid_data[:, -1]

        Y_real = []
        for y in Y:
            Y_real.append([y])

        Y_real = np.asarray(Y_real)

        inputLayer = valid_X

        sum_sinapse0 = np.dot(inputLayer, self._bestweights0)
        hiddenLayer = libfunc.sigmoid(sum_sinapse0) * self._bias

        sum_sinapse1 = np.dot(hiddenLayer, self._bestweights1)
        outputLayer = libfunc.sigmoid(sum_sinapse1) * self._bias
        rmse = np.average((Y_real - outputLayer)**2)
        return self.nash_sutcliffe(
            Y_real, outputLayer), rmse, self._bestweights0, self._bestweights1
コード例 #14
0
    def test(self, weights0, weights1):
        np.random.shuffle(self._test_data)
        test_X = self._test_data[:, :7]
        Y = self._test_data[:, -1]

        Y_real = []
        for y in Y:
            Y_real.append([y])

        Y_real = np.asarray(Y_real)

        inputLayer = test_X

        sum_sinapse0 = np.dot(inputLayer, weights0)
        hiddenLayer = libfunc.sigmoid(sum_sinapse0) * self._bias

        sum_sinapse1 = np.dot(hiddenLayer, weights1)
        outputLayer = libfunc.sigmoid(sum_sinapse1) * self._bias

        plt.clf()
        plt.plot(Y_real)
        plt.plot(outputLayer)
        plt.legend(['Real', 'Estimado'], loc='upper right')
        plt.grid()
        plt.savefig(
            os.path.dirname(__file__) + "\\graphs\\test\\" + "test.png")
        # plt.show()

        rmse = np.average((Y_real - outputLayer)**2)

        plt.clf()
        plt.plot((Y_real - outputLayer))
        plt.grid()
        plt.savefig(
            os.path.dirname(__file__) + "\\graphs\\test\\" +
            "test_erro_ep.png")

        print("NASH : " + str(self.nash_sutcliffe(Y_real, outputLayer)) +
              " - MSE: " + str(rmse))
コード例 #15
0
	def classify(self, dataVector):
		self.inputLayer = []
		self.hiddenLayer = []
		self.outputLayer = []
		self.inputLayer.append(dataVector)
		self.inputLayer[0].insert(0, 1.0)

		#multiple inputlayer and weights
		z1 = np.matrix(self.inputLayer) * np.matrix(self.weightMatrices[0])
		z1 = z1.tolist()
		#print z1
		
		#get hidden layer
		t = []
		for i in z1[0]:
			t.append(activefunc.sigmoid(i))
		self.hiddenLayer.append(t)
		#print self.hiddenLayer
		self.hiddenLayer[0].insert(0, 1.0)
		#print self.hiddenLayer
		#print len(self.hiddenLayer)
		#print len(self.hiddenLayer[0])

		#get output layer
		z2 = np.matrix(self.hiddenLayer) * np.matrix(self.weightMatrices[1])
		z2 = z2.tolist()
		#print z2
		#get outputlayer
		t = []
		for i in z2[0]:
			t.append(activefunc.sigmoid(i))
		self.outputLayer.append(t)
		for i in range(len(self.outputLayer[0])):
			if self.outputLayer[0][i] == max(self.outputLayer[0]):
				self.outputLayer[0][i] = 1
			else:
				self.outputLayer[0][i] = 0
		return self.outputLayer
コード例 #16
0
def middle_layer(x, w, b):
    """middle layer
    多層パーセプトロンの中間層

    Args:
        x: input
        w: wight
        b: bias

    Retuens:
        output of neuron
    """
    u = np.dot(x, w) + b
    return sigmoid(u)
コード例 #17
0
def linear_activation_forward(A_prev, W, b, activation):

    if activation == 'sigmoid':
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = sigmoid(Z)

    elif activation == 'relu':
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = relu(Z)

    assert A.shape == (W.shape[0], A_prev.shape[1])
    cache = (linear_cache, activation_cache)

    return A, cache
コード例 #18
0
    def test(x, y, w1, w2):
        predictions = []
        scores = 0

        for i in range(len(x)):
            l0 = x[i]
            l1 = LeakyReLU(l0.dot(w1))
            l2 = sigmoid(l1.dot(w2))

            predictions.append(
                [1, 0]) if l2[0] > l2[-1] else predictions.append([0, 1])
            if predictions[i][0] == y[i][0] and predictions[i][1] == y[i][1]:
                scores += 1

        return scores / len(y)
コード例 #19
0
def linear_activation_forward(A_prev, W, b, activation):

    if activation == "sigmoid":
        # Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = sigmoid(Z)

    elif activation == "relu":
        # Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
        Z, linear_cache = linear_forward(A_prev, W, b)
        A, activation_cache = relu(Z)

    assert (A.shape == (W.shape[0], A_prev.shape[1]))
    cache = (linear_cache, activation_cache)

    return A, cache
コード例 #20
0
 def __feedforward(self):
     activation = self.__activation_func()[0]
     bias1 = np.ones((self.input.shape[0], 1))
     self.z1 = np.concatenate((np.dot(self.input, self.weights1), bias1),
                              axis=1)
     self.layer1 = activation(self.z1)
     self.layers = [self.layer1]
     self.zs = [self.z1]
     for weight in self.weights:
         prev_layer = self.layers[-1]
         bias = np.ones((prev_layer.shape[0], 1))
         z = np.concatenate((np.dot(prev_layer, weight), bias), axis=1)
         layer = activation(z)
         self.layers.append(layer)
         self.zs.append(z)
     self.z_last = np.dot(self.layers[-1], self.last_weights)
     self.output = self.z_last if self.objective == 'regression' else sigmoid(
         self.z_last)
     return self.output
コード例 #21
0
ファイル: network.py プロジェクト: FrankieYin/MLStarter
    def _forwardprop(self, inputs):
        """
        :param inputs:
        a matrix X = [x1, ... , xm], where xi is a single training input, 784*1

        :return:
        (A, Z), where A is a list of matrices of activations of each training input;
        Z is the list of matrices of intermediate z values of each training input
        """
        activation = inputs
        activations = [activation]
        zs = []
        for w, b, r in zip(self.weights, self.biases, self.mask):
            z = np.dot(w, activation) + b
            zs.append(z)
            activation = r * af.sigmoid(z)
            activations.append(activation)

        return (activations, zs)
コード例 #22
0
def forwardprop_LogReg(X, parameters):
    """
    Argument:
    X -- matrix of shape (n_x, m) of inputs, each column is a training example
    parameters -- dictionary containing the parameters:
                    W -- weight vector of shape (n_x, 1)
                    b -- bias float 
    
    Returns:
    A -- an array of shape (1, m) containing the outputs for each example 
     
    """
    W = parameters['W']
    b = parameters['b']

    Z = np.dot(W.T, X) + b
    A = sigmoid(Z)

    return A
コード例 #23
0
    def __run_network(self,
                      registers: np.array,
                      debug: dict = None) -> np.array:
        def take_params(values, i):
            """Return the next pair of weights and biases after the
            starting index and the new starting index."""
            return values[i], values[i + 1], i + 2

        # Extract the 0th (i.e. P( x = 0 )) component from all registers.
        last_hidden_layer = np.array(registers[:, 0][None, ...],
                                     dtype=np.float32)

        # Propogate forward to hidden layers.
        idx = 0
        for i in range(self.context.num_hidden_layers):
            W, b, idx = take_params(self.context.network, idx)
            last_hidden_layer = relu(last_hidden_layer.dot(W) + b)

        controller_coefficients = []
        for i, gate in enumerate(self.context.gates):
            coeffs = []
            for j in range(gate.arity):
                W, b, idx = take_params(self.context.network, idx)
                coeff = softmax(last_hidden_layer.dot(W) + b)
                coeffs.append(coeff)
            controller_coefficients.append(coeffs)

        # Forward propogate to new register value coefficients.
        for i in range(self.context.num_regs):
            W, b, idx = take_params(self.context.network, idx)
            coeff = softmax(last_hidden_layer.dot(W) + b)
            controller_coefficients.append(coeff)

        # Forward propogate to generate willingness to complete.
        W, b, idx = take_params(self.context.network, idx)
        complete = sigmoid(last_hidden_layer.dot(W) + b)

        if debug is not None:
            debug.fi = np.around(complete.sum(), 3)

        return controller_coefficients, complete
コード例 #24
0
ファイル: Perceptron.py プロジェクト: Fsn9/Perceptron_2D
 def feed_forward(self, x):
     if self.activation_function == 'sigmoid':
         # z = weighted sum
         z = np.dot(self.weights, x) + self.bias
         # a = activation function
         a = af.sigmoid(z)
         # y = output
         y = a
     elif self.activation_function == 'heavyside':
         # z = weighted sum
         z = np.dot(self.weights, x) + self.bias
         # a = activation function
         a = af.heavyside(z)
         # y = output
         y = a
     elif self.activation_function == 'relu':
         # z = weighted sum
         z = np.dot(self.weights, x) + self.bias
         # a = activation function
         a = af.relu(z)
         # y = output
         y = a
     elif self.activation_function == 'leaky_relu':
         # z = weighted sum
         z = np.dot(self.weights, x) + self.bias
         # a = activation function
         a = af.leaky_relu(z)
         # y = output
         y = a
     elif self.activation_function == 'tanh':
         # z = weighted sum
         z = np.dot(self.weights, x) + self.bias
         # a = activation function
         a = af.tanh(z)
         # y = output
         y = a
     return y, z
	def activationFunction(self, data):
		return af.sigmoid(data)		
コード例 #26
0
	def train(self):
		#generate random weights
		self.generateWeights(self.weightMatrices, neuronsPerHiddenLayer - 1, neuronsInputLayer)
		self.generateWeights(self.weightMatrices, neuronsOutputLayer, neuronsPerHiddenLayer)
		#print self.weightMatrices[0]
		
		iterator = 0
		oldCost = 0.0
		cost = 0.0
		learningRatio = maxLearingRatio
		while iterator < maxLoop:
			#loop for each vector
			for vector in self.trainSet:
				#forward propagation
				self.inputLayer.append(vector[1:])
				self.inputLayer[0].insert(0, 1.0)
				#print self.inputLayer

				#multiple inputlayer and weights
				z1 = np.matrix(self.inputLayer) * np.matrix(self.weightMatrices[0])
				z1 = z1.tolist()
				#print z1
				
				#get hidden layer
				t = []
				for i in z1[0]:
					t.append(activefunc.sigmoid(i))
				self.hiddenLayer.append(t)
				#print self.hiddenLayer
				self.hiddenLayer[0].insert(0, 1.0)
				#print self.hiddenLayer
				#print len(self.hiddenLayer)
				#print len(self.hiddenLayer[0])

				#get output layer
				z2 = np.matrix(self.hiddenLayer) * np.matrix(self.weightMatrices[1])
				z2 = z2.tolist()
				#print z2

				#get outputlayer
				t = []
				for i in z2[0]:
					t.append(activefunc.sigmoid(i))
				self.outputLayer.append(t)
				#print self.outputLayer
				
				#get cost function
				cost = self.costFunction(self.outputLayer)
				#print cost
				if np.abs(cost - oldCost) <= limit:
					break
				#print np.abs(cost - oldCost)
				#print cost

				#backward propagation
				delta = []
				t1 = np.zeros((neuronsInLayers[0], neuronsInLayers[1] - 1)).tolist()
				t2 = np.zeros((neuronsInLayers[1], neuronsInLayers[2])).tolist()
				delta.append(t1)
				delta.append(t2)
				#print delta

				#get error value for output layer
				sigma3 = []
				t = []
				for i in range(neuronsInLayers[2]):
					t.append(self.outputLayer[0][i] - vector[0][i])
				sigma3.append(t)
				#print sigma3
				
				#get error value for hidden layer
				sigma2 = []
				sigma2 = np.matrix(sigma3) * np.matrix(np.transpose(self.weightMatrices[1]))
				sigma2 = sigma2.tolist()
				#print sigma2
				derivative = []
				t = []
				for i in self.hiddenLayer[0]:
					t.append(activefunc.derivativeSigmoid(i))
				derivative.append(t)
				#print derivative
				t = []
				for a,b in zip(sigma2[0], derivative[0]):
					t.append(a * b)
				sigma2 = []
				sigma2.append(t[1:])
				#print sigma2

				#calculate delta[0]
				for i in range(len(delta[0])):
					for j in range(len(delta[0][i])):
						delta[0][i][j] += self.inputLayer[0][i] * sigma2[0][j]
				#print delta[0]

				#calculate delta[1]
				for i in range(len(delta[1])):
					for j in range(len(delta[1][i])):
						delta[1][i][j] += self.hiddenLayer[0][i] * sigma3[0][j]
				#print delta[1]

				#derivative for cost function
				costDerivative = []
				costDerivative.append(self.derivativeCostFunction(delta, 0))
				costDerivative.append(self.derivativeCostFunction(delta, 1))
				#print costDerivative[0]

				#update weights
				for k in range(len(self.weightMatrices)):
					for i in range(len(self.weightMatrices[k])):
						for j in range(len(self.weightMatrices[k][i])):
							self.weightMatrices[k][i][j] = self.weightMatrices[k][i][j] - learningRatio * costDerivative[k][i][j]
				#print len(self.weightMatrices[0])

				self.inputLayer = []
				self.hiddenLayer = []
				self.outputLayer = []
				oldCost = cost
				learningRatio = learningRatio / (1.0 + (iterator / maxLoop))

			if np.abs(cost - oldCost) <= limit:
					break
			iterator += 1
		print "Error: %f" %cost
コード例 #27
0
ファイル: units.py プロジェクト: daleloogn/singerID-morb-RBM
 def mean_field_from_activation(self, vmap):
     return activation_functions.sigmoid(vmap[self] - vmap[self.flipped_units])
コード例 #28
0
ファイル: units.py プロジェクト: daleloogn/singerID-morb-RBM
 def success_probability_from_activation(self, vmap):
     return activation_functions.sigmoid(vmap[self])
コード例 #29
0
ファイル: units.py プロジェクト: daleloogn/singerID-morb-RBM
 def sample_from_activation(self, vmap):
     p = activation_functions.sigmoid(vmap[self] - vmap[self.flipped_units])
     return samplers.bernoulli(p)
コード例 #30
0
ファイル: units.py プロジェクト: daleloogn/singerID-morb-RBM
 def mean_field_from_activation(self, vmap):
     return activation_functions.sigmoid(vmap[self])
コード例 #31
0
 def sample_from_activation(self, vmap):
     p = activation_functions.sigmoid(vmap[self] - vmap[self.flipped_units])
     return samplers.bernoulli(p)
コード例 #32
0
 def success_probability_from_activation(self, vmap):
     return activation_functions.sigmoid(vmap[self])
コード例 #33
0
    def train(self):
        rmseArray = []
        epoch = self._epochs
        # condicao de parada por epocas
        while epoch > 0:

            np.random.shuffle(self._train_data)

            train_X = self._train_data[:, :15]
            Y = self._train_data[:, -1]
            train_Y = []
            for y in Y:
                train_Y.append([y])

            train_Y = np.asarray(train_Y)
            print("Epoch: " + str(epoch))

            inputLayer = train_X

            # Camada de entrada
            sum_sinapse0 = np.dot(inputLayer, self._weights0)
            hiddenLayer = libfunc.sigmoid(sum_sinapse0)

            sum_sinapse1 = np.dot(hiddenLayer, self._weights1)
            outputLayer = libfunc.sigmoid(sum_sinapse1)

            outputLayerError = train_Y - outputLayer

            # Condição
            rmse = mean_squared_error(train_Y, outputLayer)
            rmseArray.append(np.mean(outputLayerError))
            if rmse < self._rmse_min and self._stop_params == "mse":
                break

            derivedOutput = deriFunc.derived_sigmoid(outputLayer)
            deltaOutput = outputLayerError * derivedOutput

            weight1T = self._weights1.T
            deltaOutXWeight = deltaOutput.dot(weight1T)
            deltaOutputHidden = deltaOutXWeight * deriFunc.derived_sigmoid(
                hiddenLayer)

            hiddenLayerT = hiddenLayer.T
            weight1_new = hiddenLayerT.dot(deltaOutput)
            self._weights1 = (self._weights1 * self._momentum) + (
                weight1_new * self._learning_rate)

            inputLayerT = inputLayer.T
            weight0_new = inputLayerT.dot(deltaOutputHidden)
            self._weights0 = (self._weights0 * self._momentum) + (
                weight0_new * self._learning_rate)

            # implementar validacao cruzada
            cross_stop = False

            if cross_stop and self._stop_params == "crossvalidation":
                break

            epoch -= 1
            # fim de todas epocas
            np.savetxt("RMSE.txt", rmseArray, fmt='%1.6f')
        plt.clf()
        plt.plot(rmseArray)
        plt.grid()
        plt.show()
コード例 #34
0
 def mean_field_from_activation(self, vmap):
     return activation_functions.sigmoid(vmap[self] -
                                         vmap[self.flipped_units])
def derived_sigmoid(x):
    return func.sigmoid(x) * (1 - func.sigmoid(x))
 def test_sigmoid(self):
     result = af.sigmoid(np.array([[1, 2], [3, 4]]))
     expected_result = np.array([[0.731058, 0.880797], [0.952574, 0.982013]])
     difference = result - expected_result
     self.assertTrue(np.linalg.norm(difference) < 1e-4)
コード例 #37
0
 def mean_field_from_activation(self, vmap):
     return activation_functions.sigmoid(vmap[self])