Beispiel #1
0
def predict(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']
    a1 = np.dot(x, W1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, W3) + b3
    y = softmax(a3)
    return y
def forward(network, x):
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']
    a1 = np.dot(x, W1) + b1
    z1 = common.sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    z2 = common.sigmoid(a2)
    a3 = np.dot(z2, W3) + b3
    y = common.identity_function(a3)
    return y
def forward(network, x):
    W1, W2, W3 = network["W1"], network["W2"], network["W3"]
    b1, b2, b3 = network["b1"], network["b2"], network["b3"]

    a1 = x @ W1 + b1
    z1 = sigmoid(a1)
    a2 = z1 @ W2 + b2
    z2 = sigmoid(a2)
    a3 = z2 @ W3 + b3
    y = identity_function(a3)

    return y
Beispiel #4
0
def forward(network, x):
    """입력 신호를 출력으로 변환하는 처리과정을 구현
    함수 이름이 forward인 것은 신호가 순방향으로 전달됨을 알리기 위해"""
    W1, W2, W3 = network['W1'], network['W2'], network['W3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    a1 = np.dot(x, W1) + b1
    z1 = sigmoid(a1)
    a2 = np.dot(z1, W2) + b2
    z2 = sigmoid(a2)
    a3 = np.dot(z2, W3) + b3
    y = identity_function(a3)

    return y
Beispiel #5
0
    def predict(self, X):

        m, n = X.shape
        O = len(self.w2)
        # m, I + 1
        X = np.column_stack([X, np.matrix(np.ones([m, 1]))])
        # m, H
        h1 = sigmoid(X * self.w1.T)
        # m, H + 1
        h1 = np.column_stack([h1, np.matrix(np.ones([m, 1]))])
        # m, O
        h2 = sigmoid(h1 * self.w2.T)

        return np.argmax(h2, 1)
Beispiel #6
0
Datei: nn.py Projekt: 4everer/ml
    def predict(self, X):

        m, n = X.shape
        O = len(self.w2)
        # m, I + 1
        X = np.column_stack([X, np.matrix(np.ones([m, 1]))])
        # m, H
        h1 = sigmoid(X * self.w1.T)
        # m, H + 1
        h1 = np.column_stack([h1, np.matrix(np.ones([m, 1]))])
        # m, O
        h2 = sigmoid(h1 * self.w2.T)
        
        return np.argmax(h2, 1) 
def forward_propagation(network, x):
    w1, w2, w3 = network['w1'], network['w2'], network['w3']
    b1, b2, b3 = network['b1'], network['b2'], network['b3']

    a1 = np.dot(x, w1) + b1
    z1 = sigmoid(a1)

    a2 = np.dot(z1, w2) + b2
    z2 = sigmoid(a2)

    a3 = np.dot(z2, w3) + b3
    y = identity(a3)

    return y
Beispiel #8
0
    def cost(self, w, X, Y, lamb, I, H, O):

        m, n = X.shape

        w1 = w[0:H * (I + 1)].reshape(H, I + 1)
        w2 = w[H * (I + 1):].reshape(O, H + 1)
        # m, n + 1
        a1 = np.column_stack([X, np.matrix(np.ones([m, 1]))])
        # m, H
        z2 = a1 * w1.T
        s2 = sigmoid(z2)
        # m, H + 1
        a2 = np.column_stack([s2, np.matrix(np.ones([m, 1]))])
        # m, O
        z3 = a2 * w2.T
        # m, O
        a3 = sigmoid(z3)

        I = Y.T
        Y = np.matrix(np.zeros([m, O]))
        Y[(np.matrix(range(m)), I)] = 1

        L = (1.0 / m) * (-np.multiply(Y, np.log(a3)) -
                         np.multiply(1.0 - Y, np.log(1.0 - a3))).sum()

        R = (lamb / (2.0 * m)) * (np.square(w1[:, 0:-1]).sum() +
                                  np.square(w2[:, 0:-1]).sum())

        J = L + R
        # m, O
        delta3 = a3 - Y
        # m, H
        delta2 = np.multiply(delta3 * w2[:, 0:-1], np.multiply(s2, 1.0 - s2))
        # H, n + 1
        l1_grad = delta2.T * a1
        # O, H + 1
        l2_grad = delta3.T * a2

        r1_grad = np.column_stack([w1[:, 0:-1], np.matrix(np.zeros([H, 1]))])
        r2_grad = np.column_stack([w2[:, 0:-1], np.matrix(np.zeros([O, 1]))])

        w1_grad = (1.0 / m) * l1_grad + (1.0 * lamb / m) * r1_grad
        w2_grad = (1.0 / m) * l2_grad + (1.0 * lamb / m) * r2_grad

        grad = np.row_stack([w1_grad.reshape(-1, 1), w2_grad.reshape(-1, 1)])

        self.c += 1

        return J, grad
Beispiel #9
0
def cost(theta, X, y, lam):
    h = sigmoid(np.dot(X, theta))
    t = np.zeros(len(theta))
    t[1:] = theta[1:]
    J = (-(np.dot(y, np.log(h)) + np.dot(1 - y, np.log(1 - h))) / X.shape[0] +
         lam * np.dot(t, t) / (2 * X.shape[0]))
    return J
Beispiel #10
0
def train2l(w1, w2):
    for rev_i, review in enumerate(x_train * iterations):
        for target_i in range(len(review)):
            target_samples = [review[target_i]] + list(
                x_concat[(np.random.rand(negative) *
                          len(x_concat)).astype('int').tolist()])
            left_context = review[max(0, target_i - window):target_i]
            right_context = review[target_i + 1:min(len(review), target_i +
                                                    window)]

            try:
                l1 = np.mean(w1[left_context + right_context], axis=0)
                l2 = common.sigmoid(l1.dot(w2[target_samples].T))

                d2 = l2 - l2_target
                d1 = d2.dot(w2[target_samples])

                w1[left_context + right_context] -= d1 * alpha
                w2[target_samples] -= np.outer(d2, l1) * alpha
            except Exception as e:
                print("in nlpblankNN.py : ", l1.dot(w2[target_samples].T),
                      rev_i, target_i, e, target_samples)
                return (w1, w2)
            # finally :
            #     print (w2[target_samples].T, rev_i, target_i, target_samples)

            # if (rev_i == 0 and target_i == 3):
            #     print(l1.shape, l2.shape, w2[target_samples].shape, d1.shape, d2.shape)
            #     return
    return (w1, w2)
def train_model():
    # Step 0 - pull data provider 
    mnist = get_data_provider()

    # Step 1 - build the model
    W_1 = draw_params((HIDDEN_1_SIZE, IMG_SIZE))
    b_1 = draw_params((HIDDEN_1_SIZE, 1))

    W_2 = draw_params((HIDDEN_2_SIZE, HIDDEN_1_SIZE))
    b_2 = draw_params((HIDDEN_2_SIZE, 1))

    W_3 = draw_params((OUTPUT_CLASSES_NO, HIDDEN_2_SIZE))
    b_3 = draw_params((OUTPUT_CLASSES_NO, 1))

    # Step 3 - train the model
    accuracy_acc = []
    for iter_no in tqdm.tqdm(xrange(NUMBER_OF_TRAINING_ITERATIONS)):
        x, y = mnist.train.next_batch(BATCH_SIZE)

        # forward pass
        a_1 = np.dot(x, W_1.T) + b_1.T
        h_1 = elu(a_1)
        a_2 = np.dot(h_1, W_2.T) + b_2.T
        h_2 = elu(a_2)
        y_hat = sigmoid(np.dot(h_2, W_3.T) + b_3.T)

        err = y_hat - y

        # some accounting
        pred_class = np.argmax(y_hat, axis=1)
        accuracy = accuracy_score(unbinarize(y), pred_class)
        accuracy_acc.append(accuracy)

        # backward pass
        dW_3 = np.dot(err.T, h_2) / h_2.shape[0]
        db_3 = err.mean()

        dh_2 = np.dot(err, W_3)
        da_2 = dh_2 * d_elu(a_2)

        dW_2 = np.dot(da_2.T, h_1) / h_1.shape[0]
        db_2 = da_2.mean()

        dh_1 = np.dot(da_2, W_2)
        da_1 = dh_1 * d_elu(a_1)

        dW_1 = np.dot(da_1.T, x) / x.shape[0]
        db_1 = da_1.mean()

        # apply parameter learning
        W_1 -= LR * dW_1
        b_1 -= LR * db_1
        
        W_2 -= LR * dW_2
        b_2 -= LR * db_2

        W_3 -= LR * dW_3
        b_3 -= LR * db_3

    return accuracy_acc
Beispiel #12
0
def predict(list_image_path, param_values):
    # Load the model which detects number plates over a sliding window.
    x, y, params = model.get_detect_model()

    # Execute the model at each scale.
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        for image_name in list_image_path:
            im_gray = cv2.imread(image_name, cv2.IMREAD_GRAYSCALE) / 255.
            im_gray = cv2.resize(im_gray, (128, 64))

            print("-------------")
            feed_dict = {x: numpy.stack([im_gray])}
            feed_dict.update(dict(zip(params, param_values)))
            y_val = sess.run(y, feed_dict=feed_dict)

            letter_probs = (y_val[0,
                            0,
                            0, 1:].reshape(
                10, len(common.CHARS)))
            letter_probs = common.softmax(letter_probs)

            present_prob = common.sigmoid(y_val[0, 0, 0, 0])
            print("input", image_name)
            print("output", letter_probs_to_code(letter_probs))
def detect(im, param_vals):
    scaled_ims = list(make_scaled_ims(im, model.WINDOW_SHAPE))

    x, y, params = model.get_detect_model()

    with tf.Session() as sess:
        y_vals = []
        for scaled_im in scaled_ims:
            feed_dict = {x: numpy.stack([scaled_im])}
            feed_dict.update(dict(zip(params, param_vals)))
            y_vals.append(sess.run(y, feed_dict=feed_dict))

    for i, (scaled_im, y_val) in enumerate(zip(scaled_ims, y_vals)):
        for window_coords in numpy.argwhere(y_val[0, :, :, 0] >
                                                       -math.log(1./0.99 - 1)):
            letter_probs = (y_val[0,
                                  window_coords[0],
                                  window_coords[1], 1:].reshape(
                                    7, len(common.CHARS)))
            letter_probs = common.softmax(letter_probs)

            img_scale = float(im.shape[0]) / scaled_im.shape[0]

            bbox_tl = window_coords * (8, 4) * img_scale
            bbox_size = numpy.array(model.WINDOW_SHAPE) * img_scale

            present_prob = common.sigmoid(
                               y_val[0, window_coords[0], window_coords[1], 0])

            yield bbox_tl, bbox_tl + bbox_size, present_prob, letter_probs
Beispiel #14
0
def detect(im, param_vals):
    """
    Detect all bounding boxes of number plates in an image.
    :param im:
        Image to detect number plates in.
    :param param_vals:
        Model parameters to use. These are the parameters output by the `train`
        module.
    :returns:
        Iterable of `bbox_tl, bbox_br, letter_probs`, defining the bounding box
        top-left and bottom-right corners respectively, and a 7,36 matrix
        giving the probability distributions of each letter.
    """

    # Convert the image to various scales.
    scaled_ims = list(make_scaled_ims(im, model.WINDOW_SHAPE))

    # Load the model which detects number plates over a sliding window.
    x, y, params = model.get_detect_model()

    # Execute the model at each scale.
    with tf.Session(config=tf.ConfigProto()) as sess:
        y_vals = []

        for scaled_im in scaled_ims:
            feed_dict = {x: numpy.stack([scaled_im])}
            feed_dict.update(dict(zip(params, param_vals)))
            y_vals.append(sess.run(y, feed_dict=feed_dict))
            plt.imshow(scaled_im)
            plt.show()
    writer = tf.summary.FileWriter("logs/", sess.graph)

    # Interpret the results in terms of bounding boxes in the input image.
    # Do this by identifying windows (at all scales) where the model predicts a
    # number plate has a greater than 50% probability of appearing.
    #
    # To obtain pixel coordinates, the window coordinates are scaled according
    # to the stride size, and pixel coordinates.
    count_detect = 0
    for i, (scaled_im, y_val) in enumerate(zip(scaled_ims, y_vals)):
        for window_coords in numpy.argwhere(
                y_val[0, :, :, 0] > -math.log(1. / 0.99 - 1)):
            letter_probs = (y_val[0, window_coords[0], window_coords[1],
                                  1:].reshape(7, len(common.CHARS)))
            letter_probs = common.softmax(letter_probs)

            img_scale = float(im.shape[0]) / scaled_im.shape[0]

            bbox_tl = window_coords * (8, 4) * img_scale
            bbox_size = numpy.array(model.WINDOW_SHAPE) * img_scale

            present_prob = common.sigmoid(y_val[0, window_coords[0],
                                                window_coords[1], 0])
            count_detect += 1
            yield bbox_tl, bbox_tl + bbox_size, present_prob, letter_probs
            print("count detect:", count_detect)
            print("show return window: ", bbox_tl, "return windows box: ",
                  bbox_tl + bbox_size)
            print("present: ", present_prob)
            print("letter: ", letter_probs_to_code(letter_probs))
Beispiel #15
0
Datei: nn.py Projekt: 4everer/ml
    def cost(self, w, X, Y, lamb, I, H, O):

        m, n = X.shape

        w1 = w[0 : H * (I + 1)].reshape(H, I + 1)
        w2 = w[H * (I + 1) :  ].reshape(O, H + 1)
        # m, n + 1
        a1 = np.column_stack([X, np.matrix(np.ones([m, 1]))])
        # m, H
        z2 = a1 * w1.T
        s2 = sigmoid(z2)
        # m, H + 1
        a2 = np.column_stack([s2, np.matrix(np.ones([m, 1]))])
        # m, O
        z3 = a2 * w2.T
        # m, O
        a3 = sigmoid(z3)

        I = Y.T
        Y = np.matrix(np.zeros([m, O]))
        Y[(np.matrix(range(m)), I)] = 1
        
        L = (1.0 / m) * (- np.multiply(Y, np.log(a3)) - np.multiply(1.0 - Y, np.log(1.0 - a3))).sum()

        R = (lamb / (2.0 * m)) * (np.square(w1[:, 0 : -1]).sum() + np.square(w2[:, 0 : -1]).sum())

        J = L + R
        # m, O
        delta3 = a3 - Y
        # m, H 
        delta2 = np.multiply(delta3 * w2[:, 0 : -1], np.multiply(s2, 1.0 - s2))
        # H, n + 1
        l1_grad = delta2.T * a1
        # O, H + 1
        l2_grad = delta3.T * a2
        
        r1_grad = np.column_stack([w1[:, 0 : -1], np.matrix(np.zeros([H, 1]))])
        r2_grad = np.column_stack([w2[:, 0 : -1], np.matrix(np.zeros([O, 1]))])

        w1_grad = (1.0 / m) * l1_grad + (1.0 * lamb / m) * r1_grad
        w2_grad = (1.0 / m) * l2_grad + (1.0 * lamb / m) * r2_grad
        
        grad = np.row_stack([w1_grad.reshape(-1, 1), w2_grad.reshape(-1, 1)])
    
        self.c += 1

        return J, grad
Beispiel #16
0
def costFunctionReg(theta, X, y, lam):
    h = sigmoid(np.dot(X, theta))
    t = np.zeros(len(theta))
    t[1:] = theta[1:]
    J = (-(np.dot(y, np.log(h)) + np.dot(1 - y, np.log(1 - h))) / X.shape[0] +
         lam * np.dot(t, t) / (2 * X.shape[0]))
    grad = np.dot(X.T, h - y) / X.shape[0] + lam * t / X.shape[0]
    return J, grad
def main():
    data = pd.read_csv('csv/train.csv')

    # shuffle and split (0.7/0.3)
    data = data.sample(frac=1)
    m = len(data)
    m_train = round(m * 0.7)
    train = data.iloc[:m_train,]
    valid = data.iloc[m_train:,]

    goals_train = pd.DataFrame({'survived': train['Survived']})
    goals_valid = pd.DataFrame({'survived': valid['Survived']})
    train = prepare(train)
    valid = prepare(valid)

    goals_train = goals_train.to_numpy()
    goals_valid = goals_valid.to_numpy()
    inputs_train = train.to_numpy()
    inputs_valid = valid.to_numpy()

    theta = np.zeros((inputs_train.shape[1], 1))
    alpha = 0.01
    iterations = 100_000

    y = goals_train
    for i in range(iterations):
        m = len(inputs_train)
        x = inputs_train
        h = x.dot(theta)
        p = sigmoid(h)
        theta -= (alpha/m) * x.transpose().dot((p - y))
        c = cost(x, y, theta)
        if i % (iterations/10) == 0:
            print(f'cost={c:.3f}')


    predictions = sigmoid(inputs_valid.dot(theta))
    predictions[:,0] = predictions[:,0].round()
    correct = len(inputs_valid[(predictions == goals_valid)[:,0]])
    
    accuracy = correct / len(goals_valid) * 100
    print(f'accuracy: {accuracy:.3f}%')

    weights_file = 'weights.csv'
    np.savetxt('weights.csv', theta, delimiter=',')
    print(f'saved weights {theta} as CSV to {weights_file}')
Beispiel #18
0
 def sim(self, w1, w2):
     if w1 == w2:
         return 1
     if w1 not in self.word_to_idx or w2 not in self.word_to_idx:
         return 0
     dot = np.dot(self.vectors[self.word_to_idx[w1]],
                  self.vectors[self.word_to_idx[w2]])
     return sigmoid(self.w * dot + self.b)
Beispiel #19
0
def chap3_2_4():
    """3.2.4 시그모이드 함수 구현하기
    시그모이드 함수를 그래프로 그려봅니다.
    """
    x = np.arange(-5.0, 5.0, 0.1)
    y = sigmoid(x)
    plt.plot(x, y)
    plt.ylim(-0.1, 1.1)  # y축의 범위 지정
    plt.show()
Beispiel #20
0
def detect(im, param_vals):
    """
    Detect number plates in an image.

    :param im:
        Image to detect number plates in.

    :param param_vals:
        Model parameters to use. These are the parameters output by the `train`
        module.

    :returns:
        Iterable of `bbox_tl, bbox_br, letter_probs`, defining the bounding box
        top-left and bottom-right corners respectively, and a 7,36 matrix
        giving the probability distributions of each letter.

    """

    # Convert the image to various scales.
    scaled_ims = list(make_scaled_ims(im, model.WINDOW_SHAPE))

    # Load the model which detects number plates over a sliding window.

    # Execute the model at each scale.
    #    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.50)
    #    #sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    #    #with tf.Session(config=tf.ConfigProto()) as sess:
    #    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess1:
    y_vals = []
    for scaled_im in scaled_ims:
        feed_dict = {x: numpy.stack([scaled_im])}
        feed_dict.update(dict(zip(params, param_vals)))
        y_vals.append(sess1.run(y, feed_dict=feed_dict))

    # Interpret the results in terms of bounding boxes in the input image.
    # Do this by identifying windows (at all scales) where the model predicts a
    # number plate has a greater than 50% probability of appearing.
    #
    # To obtain pixel coordinates, the window coordinates are scaled according
    # to the stride size, and pixel coordinates.
    for i, (scaled_im, y_val) in enumerate(zip(scaled_ims, y_vals)):
        for window_coords in numpy.argwhere(
                y_val[0, :, :, 0] > -math.log(1. / 0.99 - 1)):
            #for window_coords in numpy.argwhere(y_val[0, :, :, 0] > -math.log(1./0.80 - 1)):
            letter_probs = (y_val[0, window_coords[0], window_coords[1],
                                  1:].reshape(9, len(common.CHARS)))
            letter_probs = common.softmax(letter_probs)

            img_scale = float(im.shape[0]) / scaled_im.shape[0]

            bbox_tl = window_coords * (8, 4) * img_scale
            bbox_size = numpy.array(model.WINDOW_SHAPE) * img_scale

            present_prob = common.sigmoid(y_val[0, window_coords[0],
                                                window_coords[1], 0])

            yield bbox_tl, bbox_tl + bbox_size, present_prob, letter_probs
Beispiel #21
0
def detect(im, param_vals):
    """
    Detect number plates in an image.

    :param im:
        Image to detect number plates in.

    :param param_vals:
        Model parameters to use. These are the parameters output by the `train`
        module.

    :returns:
        Iterable of `bbox_tl, bbox_br, letter_probs`, defining the bounding box
        top-left and bottom-right corners respectively, and a 7,36 matrix
        giving the probability distributions of each letter.

    """

    # Convert the image to various scales.
    scaled_ims = list(make_scaled_ims(im, model.WINDOW_SHAPE))

    # Load the model which detects number plates over a sliding window.
    x, y, params = model.get_detect_model()

    # Execute the model at each scale.
    with tf.Session(config=tf.ConfigProto()) as sess:
        y_vals = []
        for scaled_im in scaled_ims:
            feed_dict = {x: numpy.stack([scaled_im])}
            feed_dict.update(dict(zip(params, param_vals)))
            y_vals.append(sess.run(y, feed_dict=feed_dict))

    # Interpret the results in terms of bounding boxes in the input image.
    # Do this by identifying windows (at all scales) where the model predicts a
    # number plate has a greater than 50% probability of appearing.
    #
    # To obtain pixel coordinates, the window coordinates are scaled according
    # to the stride size, and pixel coordinates.
    for i, (scaled_im, y_val) in enumerate(zip(scaled_ims, y_vals)):
        for window_coords in numpy.argwhere(y_val[0, :, :, 0] >
                                                       -math.log(1./0.99 - 1)):
            letter_probs = (y_val[0,
                                  window_coords[0],
                                  window_coords[1], 1:].reshape(
                                    7, len(common.CHARS)))
            letter_probs = common.softmax(letter_probs)

            img_scale = float(im.shape[0]) / scaled_im.shape[0]

            bbox_tl = window_coords * (8, 4) * img_scale
            bbox_size = numpy.array(model.WINDOW_SHAPE) * img_scale

            present_prob = common.sigmoid(
                               y_val[0, window_coords[0], window_coords[1], 0])

            yield bbox_tl, bbox_tl + bbox_size, present_prob, letter_probs
Beispiel #22
0
    def cost(self, w, X, Y, lamb):   
        m = len(X)
        S = sigmoid(X * w)
    
        L = (1.0 / (2 * m)) * (- Y.T * np.log(S) - (1.0 - Y).T * np.log(1.0 - S))
        fx = float(L + (lamb / 2.0) * (w.T * w))

        df = (1.0 / m) * X.T * (S - Y) + 1.0 * lamb * w

        self.c += 1

        return fx, df
def forward_progation(x):
    w1 = params['w1']
    b1 = params['b1']

    a1 = np.dot(x, w1) + b1
    z1 = sigmoid(a1)

    w2 = params['w2']
    b2 = params['b2']
    a2 = np.dot(z1, w2) + b2

    y = softmax(a2)
    return y
Beispiel #24
0
def train2l(input, w1, w2):
    for n in range(iterations):
        correct = 0
        for i in range(trainsize):
            y = y_train[i][0]
            l0 = input[i]
            l1 = common.sigmoid(np.sum(w1[l0], axis=0))
            l2 = common.sigmoid(l1.dot(w2))

            d2 = l2 - y
            d1 = d2.dot(w2.T)*common.sigmoid_deriv(l1)

            w1[l0] -= d1*alpha
            w2 -= np.outer(l1, d2)*alpha

            if (np.abs(d2) < 0.5):
                correct += 1
            
            # if (i == 0):
            #     print (l1.shape, l2.shape, d2.shape, d1.shape, w1[l0].shape)
            
        
        print ("iter = " + str(n), "Train accuracy = " + str(correct/trainsize))

    correct = 0
    #write down weights learned here into a file so that it can be re-used later
    for i in range(testsize):
        y = y_test[i][0]
        l0 = x_test[i]
        l1 = common.sigmoid(np.sum(w1[l0], axis = 0))
        l2 = common.sigmoid(l1.dot(w2))
        d2 = l2 -y

        if (np.abs(d2) < 0.5):
            correct += 1
        
    print ("Test accuracy = " + str(correct/testsize))

    return (w1, w2)
Beispiel #25
0
    def cost(self, w, X, Y, lamb):
        m = len(X)
        S = sigmoid(X * w)

        L = (1.0 / (2 * m)) * (-Y.T * np.log(S) -
                               (1.0 - Y).T * np.log(1.0 - S))
        fx = float(L + (lamb / 2.0) * (w.T * w))

        df = (1.0 / m) * X.T * (S - Y) + 1.0 * lamb * w

        self.c += 1

        return fx, df
Beispiel #26
0
def forward_progation(x):
    w1 = params['w1']
    b1 = params['b1']           #   data수 * 입력수,     입력수 * 출력수,    출력수
    a1 = np.dot(x, w1) + b1     # x: 100  *   784,  w1: 784   * 50,    b1: 50

    z1 = sigmoid(a1)            #역전파시에는 sigmoid 사용하면 안됨
    #z1 = relu(a1)

    w2 = params['w2']
    b2 = params['b2']           #   data수 * 입력수,     입력수 * 출력수,    출력수
    a2 = np.dot(z1, w2) + b2    # z1: 100  *   50,  w2: 50   * 10,    b2: 10
                                # data수  * 출력수
    y = softmax(a2)             # y: 100    * 10

    return y
Beispiel #27
0
    def detect(self, im):
        """
        Detect number plates in an image.

        :param im:
            Image to detect number plates in.

        :returns:
            Iterable of `bbox_tl, bbox_br, letter_probs`, defining the bounding box
            top-left and bottom-right corners respectively, and a 7,36 matrix
            giving the probability distributions of each letter.

        """

        # Convert the image to various scales.
        MIN_SHAPE = (300, 500)
        scaled_ims = list(make_scaled_ims(im, MIN_SHAPE))

        # Execute the model at each scale.
        y_vals = []
        for scaled_im in scaled_ims:
            feed_dict = {self.x: numpy.stack([scaled_im])}
            feed_dict.update(dict(zip(self.params, self.param_vals)))
            y_vals.append(self.sess.run(self.y, feed_dict=feed_dict))

        # Interpret the results in terms of bounding boxes in the input image.
        # Do this by identifying windows (at all scales) where the model predicts a
        # number plate has a greater than 50% probability of appearing.
        #
        # To obtain pixel coordinates, the window coordinates are scaled according
        # to the stride size, and pixel coordinates.
        for i, (scaled_im, y_val) in enumerate(zip(scaled_ims, y_vals)):
            for window_coords in numpy.argwhere(
                    y_val[0, :, :, 0] > -math.log(1. / 0.99 - 1)):
                letter_probs = (y_val[0, window_coords[0], window_coords[1],
                                      1:].reshape(7, len(common.CHARS)))
                letter_probs = common.softmax(letter_probs)

                img_scale = float(im.shape[0]) / scaled_im.shape[0]

                bbox_tl = window_coords * (8, 4) * img_scale
                bbox_size = numpy.array(model.WINDOW_SHAPE) * img_scale

                present_prob = common.sigmoid(y_val[0, window_coords[0],
                                                    window_coords[1], 0])

                yield bbox_tl, bbox_tl + bbox_size, present_prob, letter_probs
Beispiel #28
0
def calculate_accuracy_loss(model, Gs, ts):
    correct_count = 0
    all_count = len(Gs)
    loss = []

    for G, t in zip(Gs, ts):
        x = np.zeros((G.shape[0], model.D[0]))
        x[:, 0] = 1
        y = model.forward(G, x)
        hat_y = sigmoid(y)
        predict = 1 if hat_y[0] > 0.5 else 0
        if predict == t:
            correct_count += 1
        loss.append(model.loss(G, x, t))

    loss = np.mean(np.array(loss))
    return correct_count / all_count, loss
def one_relu_run():
    W_1 = draw_params((4, 2))
    b_1 = draw_params((4, 1))
    W_2 = draw_params((4, 4))
    b_2 = draw_params((4, 1))

    i = 0

    accuracy_acc = []

    for i in tqdm.tqdm(range(7000)):
        x, y = draw_sample()

        # forward pass
        a = np.dot(x, W_1.T) + b_1.T
        y_1 = relu(a)
        y_hat = sigmoid(np.dot(y_1, W_2.T) + b_2.T)

        err = y_hat - y

        pred_class = np.argmax(y_hat, axis=1)
        accuracy = accuracy_score(unbinarize(y), pred_class)
        accuracy_acc.append(accuracy)

        # backward pass
        dW_2 = np.dot(err.T, y_1) / y_1.shape[0]
        db_2 = err.mean()

        dy_1 = np.dot(err, W_2)
        da = dy_1 * (a > 0).astype(float)

        dW_1 = np.dot(da.T, x) / x.shape[0]
        db_1 = da.mean()

        W_1 -= LR * dW_1
        b_1 -= LR * db_1

        W_2 -= LR * dW_2
        b_2 -= LR * db_2

    # show_data_sample(x, y)
    return accuracy_acc
Beispiel #30
0
def detect(im, param_vals):

    # Convert the image to various scales.
    scaled_ims = list(make_scaled_ims(im, model.WINDOW_SHAPE))

    # Load the model which detects number plates over a sliding window.
    x, y, params = model.get_detect_model()

    # Execute the model at each scale.
    with tf.Session(config=tf.ConfigProto()) as sess:
        y_vals = []
        for scaled_im in scaled_ims:
            feed_dict = {x: numpy.stack([scaled_im])}
            feed_dict.update(dict(zip(params, param_vals)))
            y_vals.append(sess.run(y, feed_dict=feed_dict))
    # Interpret the results in terms of bounding boxes in the input image.
    # Do this by identifying windows (at all scales) where the model predicts a
    # number plate has a greater than 50% probability of appearing.
    #
    # To obtain pixel coordinates, the window coordinates are scaled according
    # to the stride size, and pixel coordinates.
    for i, (scaled_im, y_val) in enumerate(zip(scaled_ims, y_vals)):
        #print(i)
        #print(numpy.argwhere(y_val[0, :, :, 0] > -math.log(1./0.99 - 1)))
        #print(-math.log(1./0.99 - 1))
        #print(numpy.argwhere(y_val[0, :, :, 0] >-math.log(1./0.99 - 1)))
        for window_coords in numpy.argwhere(
                y_val[0, :, :, 0] > -math.log(1. / 0.99 - 1)):

            letter_probs = (y_val[0, window_coords[0], window_coords[1],
                                  1:].reshape(7, len(common.CHARS)))
            letter_probs = common.softmax(letter_probs)

            img_scale = float(im.shape[0]) / scaled_im.shape[0]

            bbox_tl = window_coords * (8, 4) * img_scale
            bbox_size = numpy.array(model.WINDOW_SHAPE) * img_scale

            present_prob = common.sigmoid(y_val[0, window_coords[0],
                                                window_coords[1], 0])
            yield bbox_tl, bbox_tl + bbox_size, present_prob, letter_probs
def main():
    test = pd.read_csv('csv/test.csv')
    test = prepare(test, with_id=True).to_numpy()

    submission = pd.DataFrame({
        'PassengerId': np.array([], dtype=np.int),
        'Survived': np.array([], dtype=np.int),
    })
    for i in range(len(test)):
        id = int(test[i][0])
        inpt = test[i][1:]  # ignore id column
        pred = sigmoid(inpt.dot(weights))
        submission = submission.append({
            'PassengerId': id,
            'Survived': pred
        },
                                       ignore_index=True)

    submission['PassengerId'] = submission['PassengerId'].astype(int)
    submission['Survived'] = submission['Survived'].round().fillna(0).astype(
        int)
    submission.to_csv('submission.csv', index=False)
Beispiel #32
0
# sigmoid function & graph
import os
import sys
import numpy as np
from matplotlib import pyplot as plt
from pathlib import Path
try:
    sys.path.append(os.path.join(Path(os.getcwd()).parent, 'lib'))
    from common import sigmoid
except ImportError:
    print('Library Module Can Not Found')

x = np.arange(-10, 10, 0.1)
y = sigmoid(x)

plt.plot(x, y)
plt.show()
Beispiel #33
0
# 3층 신경망 신호 전달 구현4: 은닉 2층 활성함수 h() 적용
import os
import sys
from pathlib import Path
try:
    sys.path.append(os.path.join(os.getcwd()))
    sys.path.append(os.path.join(Path(os.getcwd()).parent, 'lib'))
    from ex03 import a2
    from common import sigmoid
except ImportError:
    print('Library Module Can Not Found')

print('\n= 신호 전달 구현4: 은닉 2층 활성함수 h() 적용 ======================')
print(f'a2 dimension: {a2.shape}')  # 2 vector

z2 = sigmoid(a2)
print(f'z2 = {z2}')
Beispiel #34
0
# 2. 학습 시험 데이터 가져오기
(train_x, train_t), (test_x, test_t) = load_mnist(normalize=True,
                                                  flatten=True,
                                                  one_hot_label=False)

# 3. 정확도 산출
xlen = len(test_x)
hit = 0

batch_size = 100

for idx, batch_sidx in enumerate(range(0, xlen, batch_size)):
    batch_x = test_x[batch_sidx:batch_size + batch_sidx]

    a1 = np.dot(batch_x, w1) + b1
    z1 = sigmoid(a1)

    a2 = np.dot(z1, w2) + b2
    z2 = sigmoid(a2)

    a3 = np.dot(z2, w3) + b3
    batch_y = softmax(a3)
    #print(batch_y.shape)

    batch_predict = np.argmax(batch_y, axis=1)
    #print(batch_predict)

    batch_t = test_t[batch_sidx:batch_size + batch_sidx]
    #print(batch_t)

    batch_hit = np.sum(batch_predict == batch_t)