def test_max_delta2(): maxlayer = MaxPoolingLayer("maxpool1", 2, 2) inputlayer = InputLayer("input", 0) data = np.array(range(1, 26)).reshape((-1, 5, 5)) data[0, 1, 2] = 1000 inputlayer.feed(data) input_delta = np.zeros(data.shape) delta = np.array(range(1, 10)).reshape((-1, 3, 3)) fakeOutput = InputLayer("output", 0) maxlayer.set_input_layer(inputlayer) maxlayer.set_next_layer(fakeOutput) maxlayer.init() maxlayer.active() maxlayer.delta = delta maxlayer.calc_input_delta(input_delta) print(maxlayer) print("\ninput data:") print(data) print("\nmax pooling result:") print(maxlayer.get_output()) print("\norigin pooling result:") print(maxlayer.output) print("\nindex-keeper:") print(maxlayer.input_keeper) print("\ninput_delta") print(input_delta) return
def test_max_active2(): maxlayer = MaxPoolingLayer("maxpool1", 2, 2) inputlayer = InputLayer("input", 0) data = np.array(range(1, 37)).reshape((-1, 6, 6)) data[0, 1, 2] = 1000 inputlayer.feed(data) fakeOutput = InputLayer("output", 0) maxlayer.set_input_layer(inputlayer) maxlayer.set_next_layer(fakeOutput) maxlayer.init() maxlayer.active() print(maxlayer) print("\ninput data:") print(data) print("\nmax pooling result:") print(maxlayer.get_output()) print("\norigin pooling result:") print(maxlayer.output) print("\nindex-keeper:") print(maxlayer.input_keeper) return
def test1(): m = 3 n = 5 y = get_random_vectors(m, n) nn = NNetwork() myin = InputLayer("input", 1) emb = EmbeddingLayer("emb1", m, n) myout = FakeOutputLayer("output", n) nn.set_input(myin) nn.set_output(myout) nn.add_hidden_layer(emb) nn.connect_layers() nn.set_log_interval(1000) print(emb.weights) print("*"*40) data = get_train_data(m) for i in range(500): train_it(nn, y, data) if i % 10 == 0: evaluate_it(nn, y, data, "epoch-%s" % i) print(y) print("*"*40) print(emb.weights) return
def construct_nn(l2=0.0): seq_input = InputLayer("word sequence input", -1) seq_output = SoftmaxOutputLayer("word predict", VOC_SIZE) # 1. set input and output layers nn = NNetwork() nn.set_input(seq_input) nn.set_output(seq_output) #2. set embedding layer emb = EmbeddingLayer("embedding", VOC_SIZE, WORD_DIM) emb.set_lambda2(l2) nn.add_hidden_layer(emb) #3. set RNN layer rnn = RNNLayer("rnn1", RNN_HIDDEN_DIM, MAX_BPTT_STEPS) rnn.set_lambda2(l2) nn.add_hidden_layer(rnn) #4. add another RNN layer #rnn2 = RNNLayer("rnn2", RNN_HIDDEN_DIM2, MAX_BPTT_STEPS) #rnn2.set_lambda2(l2) #nn.add_hidden_layer(rnn2) #5. complete the nerual network nn.connect_layers() logging.info("NN information:\n" + nn.get_detail()) return nn
def test_normal_active(): img_input = InputLayer("fake_input", 784) h1 = HiddenLayer("h1", 784, activation.tanhFunc) h1.set_input_layer(img_input) h1.init() input_data = np.random.uniform(-1, 1, 784) img_input.feed(input_data) begin = datetime.now() print("[%s] begin to active." % (str(begin))) for i in range(1000): h1.active() end = datetime.now() delta = end - begin print("[%s] end of activation, delta=%s." % (str(end), str(delta))) return
def test_active(): img_input = InputLayer("fake_input", 784) conv = ConvLayer("conv1") conv.set_kernels(get_kernels()) conv.set_input_layer(img_input) input_data = np.random.uniform(-1, 1, 784) input_data = input_data.reshape((1, 28, 28)) img_input.feed(input_data) conv.init() begin = datetime.now() print("[%s] begin to active." % (str(begin))) for i in range(1000): conv.active() end = datetime.now() delta = end - begin print("[%s] end of activation, delta=%s." % (str(end), str(delta))) return