コード例 #1
0
ファイル: main_cnn.py プロジェクト: zczjx/es_net
from common_func import *
from es_net import *
from es_nn_layer import *
from mnist import load_mnist

if __name__=='__main__':
    (train_data, train_label), (test_data, test_label) = load_mnist(flatten=False)
    scale=0.01
    image_size = 28 * 28
    hidden_nodes = 100
    output_nodes = 10
    input_dim = (1, 28, 28)
    update_class = Momentum

    # random init the dnn param
    dnn = es_net()

    # Conv layer
    filter_num = 30
    filter_size = 5
    filter_pad = 0
    filter_stride = 1
    input_size = input_dim[1]
    conv_output_size = (input_size - filter_size + 2*filter_pad) / filter_stride + 1
    pool_output_size = int(filter_num * (conv_output_size/2) * (conv_output_size/2))

    scale = weight_init_scale(input_size=28*28, active_func='relu')
    dnn_weight_arr = scale * \
                    np.random.randn(filter_num, input_dim[0], filter_size, filter_size)
    dnn_bias_arr = np.zeros(filter_num)
    updater_obj = update_class(learning_rate=0.1)
コード例 #2
0
ファイル: main_batch.py プロジェクト: zczjx/es_net
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import sys, os
import pickle
import numpy as np
import matplotlib.pyplot as plt
from active_func import *
from common_func import *
from es_net import *
from mnist import load_mnist

if __name__ == '__main__':
    (train_data, train_label), (test_data, test_label) = load_mnist()
    with open("sample_weight.pkl", 'rb') as f:
        nn_param = pickle.load(f)
    accuracy_cnt = 0
    dnn = es_net(layers=3, active_func=sigmoid, out_func=softmax)
    dnn.setup_nn_param(0, weight_arr=nn_param['W1'], bias_arr=nn_param['b1'])
    dnn.setup_nn_param(1, weight_arr=nn_param['W2'], bias_arr=nn_param['b2'])
    dnn.setup_nn_param(2, weight_arr=nn_param['W3'], bias_arr=nn_param['b3'])

    batch_size = 100
    for i in range(0, len(test_data), batch_size):
        y_batch = dnn.forward_inference(test_data[i:i + batch_size])
        result_batch = np.argmax(y_batch, axis=1)
        accuracy_cnt += np.sum(result_batch == test_label[i:i + batch_size])

    print("Final Accuracy: " + str(float(accuracy_cnt) / len(test_data)))
コード例 #3
0
import numpy as np
import matplotlib.pyplot as plt
from active_func import *
from common_func import *
from es_net import *
from mnist import load_mnist

if __name__ == '__main__':
    (train_data, train_label), (test_data, test_label) = load_mnist()
    weight_init_std = 0.01
    image_size = 28 * 28
    hidden_nodes = 30
    output_nodes = 10

    # random init the dnn param
    dnn = es_net(layers=2, active_func=ReLU, out_func=softmax)

    dnn_weight_arr = weight_init_std * np.random.randn(image_size,
                                                       hidden_nodes)
    dnn_bias_arr = np.zeros(hidden_nodes)
    dnn.setup_nn_param(0, weight_arr=dnn_weight_arr, bias_arr=dnn_bias_arr)

    dnn_weight_arr = weight_init_std * np.random.randn(hidden_nodes,
                                                       output_nodes)
    dnn_bias_arr = np.zeros(output_nodes)
    dnn.setup_nn_param(1, weight_arr=dnn_weight_arr, bias_arr=dnn_bias_arr)

    train_data_num = train_data.shape[0]
    batch_size = 100
    iter_per_epoch = max(train_data_num / batch_size, 1)
    training_iters = 1001
コード例 #4
0
    hidden_nodes = 100
    output_nodes = 10
    input_dim = (1, 28, 28)

    # conv param
    filter_num = 30
    filter_size = 5
    filter_pad = 0
    filter_stride = 1
    input_size = input_dim[1]
    conv_output_size = (input_size - filter_size + 2*filter_pad) / filter_stride + 1
    pool_output_size = int(filter_num * (conv_output_size/2) * (conv_output_size/2))
    

    # cnn use SGD
    cnn_default = es_net()
    update_class = SGD

    # Conv layer
    scale = weight_init_scale(input_size=28*28, active_func='default')
    dnn_weight_arr = scale * \
                    np.random.randn(filter_num, input_dim[0], filter_size, filter_size)
    dnn_bias_arr = np.zeros(filter_num)
    updater_obj = update_class(learning_rate=0.1)
    layer_tmp = conv_layer(dnn_weight_arr, dnn_bias_arr, filter_stride, \
                    filter_pad, updater=updater_obj)
    cnn_default.add_layer(layer_obj=layer_tmp)

    # ReLU layer
    layer_tmp = ReLU_layer()
    cnn_default.add_layer(layer_obj=layer_tmp)
コード例 #5
0
    hidden_nodes = 100
    output_nodes = 10
    input_dim = (1, 28, 28)

    # conv param
    filter_num = 30
    filter_size = 5
    filter_pad = 0
    filter_stride = 1
    input_size = input_dim[1]
    conv_output_size = (input_size - filter_size + 2*filter_pad) / filter_stride + 1
    pool_output_size = int(filter_num * (conv_output_size/2) * (conv_output_size/2))
    

    # cnn use SGD
    cnn_sgd = es_net()
    update_class = SGD

    # Conv layer

    dnn_weight_arr = weight_init_std * \
                    np.random.randn(filter_num, input_dim[0], filter_size, filter_size)
    dnn_bias_arr = np.zeros(filter_num)
    updater_obj = update_class(learning_rate=0.1)
    layer_tmp = conv_layer(dnn_weight_arr, dnn_bias_arr, filter_stride, \
                    filter_pad, updater=updater_obj)
    cnn_sgd.add_layer(layer_obj=layer_tmp)

    # ReLU layer
    layer_tmp = ReLU_layer()
    cnn_sgd.add_layer(layer_obj=layer_tmp)