Exemplo n.º 1
0
    def predict(self,x): #出力
        W1,W2 = self.params['W1'],self.params['W2']
        b1,b2 = self.params['b1'],self.params['b2']

        a1 = np.dot(x,W1) + b1
        z1 = softmax(a1)
        a2 = np.dot(z1,W2) + b2
        y = softmax(a2)

        return y
Exemplo n.º 2
0
def detect(im, param_vals):
    """
    Detect number plates in an image.

    :param im:
        Image to detect number plates in.

    :param param_vals:
        Model parameters to use. These are the parameters output by the `train`
        module.

    :returns:
        a 7,36 matrix giving the probability distributions of each letter.

    """

    # Load the model which detects number plates
    x, y, params = deep_net.final_training_model()

    # Execute the model at each scale.
    with tf.Session(config=tf.ConfigProto()) as sess:
        feed_dict = {x: numpy.stack([im])}
        feed_dict.update(dict(zip(params, param_vals)))
        y_val = sess.run(y, feed_dict=feed_dict)

    #finding the probabilities of each letter being present
    letter_probs = y_val.reshape(7, len(functions.CHARS))
    letter_probs = functions.softmax(letter_probs)

    return letter_probs
    def forward_propagation(self, X):
        """
        Compute forward propagation for (L-1)* Sigmoid -> Softmax
        """
        store = {}

        A = X.T
        for l in range(self.L - 1):
            Z = self.parameters[f"W{l+1}"].dot(A) + self.parameters[f"b{l+1}"]
            A = sigmoid(Z)
            drop_prob = self.dropout_probs[l]
            D = np.random.rand(A.shape[0], A.shape[1])
            D = D >= drop_prob
            A = (A * D) / (1 - drop_prob)
            store[f"A{l+1}"] = A
            store[f"W{l+1}"] = self.parameters[f"W{l+1}"]
            store[f"Z{l+1}"] = Z
            store[f"D{l+1}"] = D

        Z = self.parameters[f"W{self.L}"].dot(
            A) + self.parameters[f"b{self.L}"]
        A = softmax(Z)
        store[f"A{self.L}"] = A
        store[f"W{self.L}"] = self.parameters[f"W{self.L}"]
        store[f"Z{self.L}"] = Z

        return A, store
    def gradient(self, x, t):
        """Calculate gradient to weight params using backpropagation.
           This can calculate faster than numerical gradient function.

        Args:
            x (numpy.ndarray): image data which mean input to NN
            t (numpy.ndarray): labels

        Return:
            dictionary: dictionary of gradient to each param.
        """

        W1, W2 = self.params['W1'], self.params['W2']
        b1, b2 = self.params['b1'], self.params['b2']
        grads = {}

        batch_num = x.shape[0]

        # forward
        a1 = np.dot(x, W1) + b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, W2) + b2
        y = softmax(a2)

        # backward
        dy = (y - t) / batch_num
        grads['W2'] = np.dot(z1.T, dy)
        grads['b2'] = np.sum(dy, axis=0)

        dz1 = np.dot(dy, W2.T)
        da1 = sigmoid_grad(a1) * dz1
        grads['W1'] = np.dot(x.T, da1)
        grads['b1'] = np.sum(da1, axis=0)

        return grads
Exemplo n.º 5
0
    def forward(self, input):
        input_length = len(input)
        # stores each state from initial to every state after an input is inserted
        states = np.zeros((input_length + 1, self.hidden_size, 1))
        # stores output of the network after each input is computed
        outputs = np.zeros((input_length, self.input_size, 1))

        for time_t in range(0, input_length):
            prev_state = states[time_t]

            current_input = np.zeros((self.input_size, 1))
            current_input[input[time_t]] = 1

            current_state = np.tanh(
                self.w_first.dot(current_input) +
                self.w_reverse.dot(prev_state))

            current_output = softmax(self.w_second.dot(current_state))

            states[time_t + 1] = current_state
            outputs[time_t] = current_output
        # based on the calculated outputs we need to take the most probable output as prediction
        predictions = [np.argmax(o) for o in outputs]

        return states, outputs, predictions
Exemplo n.º 6
0
 def fx(self, A, B):
     '''A = predicted output, B is target output.'''
     B = B.argmax(axis=1)
     m = B.shape[0]
     p = softmax(A)
     log_likelihood = -np.log(p[range(m), B])
     return np.sum(log_likelihood) / m
Exemplo n.º 7
0
def forward_propagation(W, b, x, n_hl, ac):
    h, a = [[]], [[]]

    _h, _a = [], []

    for i in range(1, n_hl + 1):
        if i == 1:
            _a = np.dot(W[i], x) + b[i]
        else:
            _a = np.dot(W[i], h[i - 1]) + b[i]

        if ac == "sig":
            _h = functions.logistic(_a)

        elif ac == "tanh":
            _h = functions.tanh(_a)

        elif ac == "relu":
            _h = functions.ReLU(_a)

        a.append(_a)
        h.append(_h)

    _a = np.dot(W[n_hl + 1], h[n_hl]) + b[n_hl + 1]
    _y = functions.softmax(_a - max(_a))

    a.append(_a)
    h.append(_y)

    return h, a
Exemplo n.º 8
0
def forward(conf, X_batch, params, is_training):
    """
    Forward propagation through fully connected network.

    X_batch:
        (batch_size, channels * height * width)
    """
    n = conf["layer_dimensions"]
    L = len(n) - 1

    # Saves the input
    A = X_batch
    features = {}
    features["A_0"] = A

    # Loop over each layer in network
    for l in range(1, L + 1):
        A_prev = A.copy()
        Z = np.dot(params["W_" + str(l)].T, A_prev) + params["b_" + str(l)]

        # Calculates activation (Relu, or softmax for output)
        if l < L:
            A = activation(Z.copy(), "relu")
        else:
            A = softmax(Z.copy())
        if is_training:
            # Save activations if training
            features["Z_" + str(l)] = Z.copy()
            features["A_" + str(l)] = A.copy()

    # Y_proposed is the probabilities returned by passing
    # activations through the softmax function.
    Y_proposed = A
    return Y_proposed, features
Exemplo n.º 9
0
    def predict(self, x):
        # print("func predict start")

        W1, W2, W3 = self.params['W1'], self.params['W2'], self.params['W3']
        b1, b2, b3 = self.params['b1'], self.params['b2'], self.params['b3']
        # 순정파 과정

        # x는 입력층

        # level-1
        # a1 = x dot W1 + b1
        a1 = np.dot(x, W1) + b1
        # z1 =  활성화함수 (a1)
        z1 = sigmoid(a1)

        # level-2
        # a2 = z1 dot W2 + b2
        a2 = np.dot(z1, W2) + b2
        # z2 = 활성화함수 (a2)
        z2 = sigmoid(a2)

        # 출력층
        # a3 = z2 dot W3 + b3
        a3 = np.dot(z2, W3) + b3
        # y = 소프트맥스 함수 (a3)
        y = softmax(a3)

        # print("func predict end")
        return y
Exemplo n.º 10
0
 def test_softmax_1(self):
     test_data = np.array([[0.3, 2.9, 4.0], [-0.5, -0.001, 0.001]])
     expected = (2, 3)
     logits = functions.softmax(test_data)
     print(logits)
     actual = logits.shape
     self.assertTrue(expected == actual)
 def forward(self, x):
     '''
     Performs a forward pass through the network.
     Params:
         x - np.array, the input vector
     '''
     for i in range(len(self.weights)):
         x = self.add_bias(x)
         self.a[i] = x
         weights = self.weights[i]
         act_fun = self.act_funs[i]
         z = np.dot(weights, x)
         self.z[i] = z
         if act_fun == 'sigmoid':
             a = fun.sigmoid(z)
         elif act_fun == 'ReLU':
             a = fun.ReLU(z)
         elif act_fun == 'tanh':
             a = fun.tanh(z)
         elif act_fun == 'softmax':
             a = fun.softmax(z)
         elif act_fun == 'ELU':
             a = fun.ELU(z)
         x = a
     return x
Exemplo n.º 12
0
def predict(x, net):  # 使用sample_pickle计算并返回识别结果(0-9)
    a1 = np.dot(x, net['W1']) + net['b1']
    z1 = sigmoid(a1)
    a2 = np.dot(z1, net['W2']) + net['b2']
    z2 = sigmoid(a2)
    a3 = np.dot(z2, net['W3']) + net['b3']
    z3 = softmax(a3)
    return z3
Exemplo n.º 13
0
def predict(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']
    x1 = sigmoid(np.dot(x, W1) + b1) # 각각 변수에 담은 후에, dot 연산하고 bias 더해서 sigmoid 돌리면 다음 층으로 이동함
    x2 = sigmoid(np.dot(x1, W2) + b2)
    x3 = np.dot(x2, W3) + b3
    y = softmax(x3)
    return y
Exemplo n.º 14
0
 def fprop(self, x):      
     
     h_a = x.dot(self.W['w1'].T) + self.W['b1']
     h_s = reLU(h_a)
     o_a = h_s.dot(self.W['w2'].T) + self.W['b2']
     o_s = softmax(o_a)
       
     return dict([('h_a',h_a),('h_s',h_s),('o_a',o_a),('o_s',o_s)])
Exemplo n.º 15
0
def predict(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']
    x1 = sigmoid(np.dot(x, W1) + b1)
    x2 = sigmoid(np.dot(x1, W2) + b2)
    x3 = np.dot(x2, W3) + b3
    y = softmax(x3)
    return y
Exemplo n.º 16
0
def predict(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    l = sigmoid(np.dot(x, W1) + b1)
    m = sigmoid(np.dot(l, W2) + b2)
    y = softmax(np.dot(m, W3) + b3)

    return y
Exemplo n.º 17
0
    def test_softmax(self):
        test_data = np.array([[0.3, 2.9, 4.0], [-0.5, -0.001, 0.001]])
        expected = np.array([[1., 1.]])

        actual = functions.softmax(test_data)
        print(actual)
        actual = np.sum(actual, axis=1)
        print(actual)
        self.assertTrue((expected == actual).all())
Exemplo n.º 18
0
    def forward(self, x, t):
        self.t = t
        self.y = softmax(x)

        if self.t.size == self.y.size:
            self.t = self.t.argmax(axis=1)

        loss = cross_entropy_error(self.y , self.t)
        return loss
Exemplo n.º 19
0
    def forward(self, x: np.ndarray, t: np.ndarray) -> float:
        self.t = t
        self.y = softmax(x)

        if self.t.size == self.y.size:
            self.t = self.t.argmax(axis=1)

        loss = cross_entropy_error(self.y, self.t)
        return loss
Exemplo n.º 20
0
    def train(self, inputs, labels, inputs_test, labels_test):
        time_start = datetime.now()
        print('\n Train:  \n\n')
        self.initial_weights_biases()
        random_state = np.random.get_state()
        np.random.shuffle(inputs)
        np.random.set_state(random_state)
        np.random.shuffle(labels)
        for epoch in range(self.num_epochs):

            for iteration in range(0, inputs.shape[0], self.batch_size):

                # batch input
                inputs_batch = inputs[iteration:iteration + self.batch_size]
                labels_batch = labels[iteration:iteration + self.batch_size]

                # forward pass
                inputs_batch = inputs_batch.T
                z1 = self.weight1.dot(inputs_batch) + self.bias1
                a1 = functions.relu(z1)
                z2 = self.weight2.dot(a1) + self.bias2
                y = functions.softmax(z2)

                delta_2 = labels_batch.T - y
                weight2_gradient = delta_2.dot(a1.T) / self.batch_size
                bias2_gradient = np.sum(delta_2, axis=1,
                                        keepdims=True) / self.batch_size

                delta_1 = self.weight2.T.dot(delta_2) * functions.relu_deriv(
                    z1)
                weight1_gradient = delta_1.dot(
                    inputs_batch.T) / self.batch_size
                bias1_gradient = np.sum(delta_1, axis=1,
                                        keepdims=True) / self.batch_size

                self.weight1 = self.weight1 + self.learning_rate * weight1_gradient
                self.weight2 = self.weight2 + self.learning_rate * weight2_gradient
                self.bias1 = self.bias1 + self.learning_rate * bias1_gradient
                self.bias2 = self.bias2 + self.learning_rate * bias2_gradient
            print('Epoch---{}'.format(epoch))
            accuracy, crossentropy = self.predict(inputs, labels, self.weight1,
                                                  self.weight2, self.bias1,
                                                  self.bias2)
            print(" accuracy: ", str(accuracy), " loss: ", str(crossentropy))
        accuracy, crossentropy = self.predict(inputs, labels, self.weight1,
                                              self.weight2, self.bias1,
                                              self.bias2)
        print("Training: \n", " accuracy: ", str(accuracy), " loss: ",
              str(crossentropy))
        delta_time = datetime.now() - time_start
        print('Time:', delta_time)
        test_accuracy, test_entropy = self.predict(inputs_test, labels_test,
                                                   self.weight1, self.weight2,
                                                   self.bias1, self.bias2)
        print("Test: \n", " accuracy: ", str(test_accuracy), " loss: ",
              str(test_entropy))
    def predict(self, x):
        W1, W2 = self.params['W1'], self.params['W2']
        b1, b2 = self.params['b1'], self.params['b2']

        a1 = np.dot(x, W1) + b1
        z1 = functions.sigmoid(a1)
        a2 = np.dot(z1, W2) + b2
        y = functions.softmax(a2)

        return y
Exemplo n.º 22
0
def predict(network, x):
    W1, W2 = network['W1'], network['W2']
    b1, b2 = network['b1'], network['b2']

    a1 = np.dot(x, W1) + b1
    z1 = f.sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    y = f.softmax(a2)

    return y
Exemplo n.º 23
0
    def forward(self, x, t):
        self.t = t
        self.y = softmax(x)

        # 教師ラベルがone-hotベクトルの場合、正解のインデックスに変換
        if self.t.size == self.y.size:
            self.t = self.t.argmax(axis=1)

        loss = cross_entropy_error(self.y, self.t)
        return loss
    def predict(self, x):
        w1, w2 = self.dict['w1'], self.dict['w2']
        b1, b2 = self.dict['b1'], self.dict['b2']

        a1 = np.dot(x, w1) + b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, w2) + b2
        y = softmax(a2)

        return y
Exemplo n.º 25
0
        def predict(self, x):
            """ given input x, calculate output using current parameters: W1,b1,W2,b2"""
            # input --> hidden layer
            z2 = np.dot(x, self.params['W1']) + self.params['b1']
            a2 = sigmoid(z2)
            # hidden layer --> output
            z3 = np.dot(a2, self.params['W2']) + self.params['b2']
            y = softmax(z3)

            return y
Exemplo n.º 26
0
    def forward(self, x, t):
        self.t = t
        self.y = softmax(x)

        # 정답 레이블이 원핫 벡터일 경우 정답의 인덱스로 변환
        if self.t.size == self.y.size:
            self.t = self.t.argmax(axis=1)

        loss = cross_entropy_error(self.y, self.t)
        return loss
    def predict(self, x):
        """ given input x, calculate output using current parameters : W1,b1,W2,b2 """
        # input --> hidden layer
        l1 = np.dot(x, self.params["W1"]) + self.params["b1"]
        l1 = sigmoid(l1)

        # hidden layer --> output layer
        l2 = np.dot(l1, self.params["W2"]) + self.params["b2"]
        self.score = softmax(l2)

        return self.score
 def predict(self, X):
     # hidden_1
     h1_input = np.dot(X, self.W1) + self.b1
     h1_output = functions.relu(h1_input)
     # hidden_2
     h2_input = np.dot(h1_output, self.W2) + self.b2
     h2_output = functions.relu(h2_input)
     # output
     o_input = np.dot(h2_output, self.W3) + self.b3
     y_hat = functions.softmax(o_input)
     return y_hat
Exemplo n.º 29
0
	def loss(self, x, t):
		"""
		x : array-like
			input
		t : array-like
			true label
		"""
		z = self.predict(x)
		y = softmax(z)
		loss = cross_entropy_error(y, t)
		return loss
Exemplo n.º 30
0
def predict(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    a1 = np.dot(x, W1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, W3) + b3
    y = softmax(a3)

    return y
Exemplo n.º 31
0
 def test_softmax(self):
     assert (softmax(np.ones(10)) == np.ones(10)/10).all()
     assert (softmax(np.ones((1, 10))) == np.ones((1, 10))/10).all()
Exemplo n.º 32
0
 def get_outputs(self):
     return functions.softmax(self.output_layer.get_values())
Exemplo n.º 33
0
    def soft_max_outputs(self):
        max_values = functions.softmax(self.get_values())

        for i in range(len(self.nodes)):
            self.nodes[i].soft_max = max_values[i]
Exemplo n.º 34
0
 def test_softmax(self):
   x = np.array([[1, 2, 3]]).T
   expected = np.array([[0.0900, 0.2447, 0.6652]]).T
   y = softmax(x)
   diff_abs = abs(y - expected)
   self.assertFalse((diff_abs > 0.0001).any())