def inference(x, output_num):
    conv1_out = op.Conv2D(input_variable=x, kernel_shape=(5, 5, 1, 12), name='conv1', padding=0).output_variables
    relu1_out = activation.Relu(input_variable=conv1_out, name='relu1').output_variables
    #dropout1_out = op.DropOut(input_variable=relu1_out, name='dropout1', phase='train', prob=0.7).output_variables
    pool1_out = op.MaxPooling(input_variable=relu1_out, ksize=2, name='pool1').output_variables

    conv2_out = op.Conv2D(input_variable=pool1_out, kernel_shape=(3, 3, 12, 24), name='conv2', padding=1).output_variables
    relu2_out = activation.Relu(input_variable=conv2_out, name='relu2').output_variables
    #dropout2_out = op.DropOut(input_variable=relu2_out, name='dropout2', phase='train', prob=0.7).output_variables
    pool2_out = op.MaxPooling(input_variable=relu1_out, ksize=2, name='pool2').output_variables

    fc_out = op.FullyConnect(input_variable=pool2_out, output_num=output_num, name='fc').output_variables
    return fc_out
Beispiel #2
0
def inference(x, output_num):
    conv1_out = op.Conv2D((5, 5, 1, 12),
                          input_variable=x,
                          name='conv1',
                          padding='VALID').output_variables
    relu1_out = op.Relu(input_variable=conv1_out,
                        name='relu1').output_variables
    pool1_out = op.MaxPooling(ksize=2, input_variable=relu1_out,
                              name='pool1').output_variables

    conv2_out = op.Conv2D((3, 3, 12, 24),
                          input_variable=pool1_out,
                          name='conv2').output_variables
    relu2_out = op.Relu(input_variable=conv2_out,
                        name='relu2').output_variables
    pool2_out = op.MaxPooling(ksize=2, input_variable=relu2_out,
                              name='pool2').output_variables

    fc_out = op.FullyConnect(output_num=output_num,
                             input_variable=pool2_out,
                             name='fc').output_variables
    return fc_out
Beispiel #3
0
from layers.softmax import Softmax

import cv2
import numpy as np

img = cv2.imread('layers/test.jpg')
img = img[np.newaxis, :]

a = var.Variable((1, 128, 128, 3), 'a')
label = var.Variable([1, 1], 'label')
import random
label.data = np.array([random.randint(1, 9)])
label.data = label.data.astype(int)

conv1_out = op.Conv2D((3, 3, 3, 3),
                      input_variable=a,
                      name='conv1',
                      padding='VALID').output_variables
relu1_out = op.Relu(input_variable=conv1_out, name='relu1').output_variables
pool1_out = op.MaxPooling(ksize=2, input_variable=relu1_out,
                          name='pool1').output_variables
fc1_out = op.FullyConnect(output_num=10, input_variable=pool1_out,
                          name='fc1').output_variables
sf_out = op.SoftmaxLoss(predict=fc1_out, label=label, name='sf').loss

new_conv1 = op.GLOBAL_VARIABLE_SCOPE['conv1']
new_fc1 = op.GLOBAL_VARIABLE_SCOPE['fc1']

conv1 = Conv2D([1, 128, 128, 3], 3, 3, 1, method='VALID')
relu1 = Relu(conv1.output_shape)
pool1 = MaxPooling(conv1.output_shape)
fc1 = FullyConnect(pool1.output_shape, 10)

batch_size = 64
global_step = 0
# set method
for k in var.GLOBAL_VARIABLE_SCOPE:
    s = var.GLOBAL_VARIABLE_SCOPE[k]
    if isinstance(s, var.Variable) and s.learnable:
        s.set_method_adam()

img_placeholder = var.Variable((batch_size, 28, 28, 1), 'input')
label_placeholder = var.Variable([batch_size, 1], 'label')

# set train_op
prediction = inference(img_placeholder, 10)
sf = op.SoftmaxLoss(prediction, label_placeholder, 'sf')


images, labels = load_mnist('./data/mnist')
test_images, test_labels = load_mnist('./data/mnist', 't10k')

# save train curve config
loss_collect = []
acc_collect = []
print ('new')
with open('logs/%s_log.txt'%VERSION, 'wb') as logf:
    for epoch in range(20):
        # random shuffle
        order = np.arange(images.shape[0])
        np.random.shuffle(order)
        _images = images[order]
Beispiel #5
0
e=1e-3
a = var.Variable((1, 128, 128, 3), 'a')
b = var.Variable((1, 128, 128, 3), 'b')

b.data = a.data.copy()
a.data[0,0,0,1] += e
b.data[0,0,0,1] -= e

# label = var.Variable([1, 1], 'label')
# import random
# label.data = np.array([random.randint(1,9)])
# label.data = label.data.astype(int)

import numpy as np
conv1_out = op.Conv2D((3, 3, 3, 3), input_variable=a, name='conv1',padding='VALID').output_variables
conv2_out = op.Conv2D((3, 3, 3, 3), input_variable=b, name='conv2',padding='VALID').output_variables

conv1 = var.GLOBAL_VARIABLE_SCOPE['conv1']
conv2 = var.GLOBAL_VARIABLE_SCOPE['conv2']
var.GLOBAL_VARIABLE_SCOPE['conv1'].weights.data = var.GLOBAL_VARIABLE_SCOPE['conv2'].weights.data
var.GLOBAL_VARIABLE_SCOPE['conv1'].bias.data = var.GLOBAL_VARIABLE_SCOPE['conv2'].bias.data

# print conv1.weights.data - conv2.weights.data
# print conv1_out.eval()-conv2_out.eval()


conv1_out.eval()
conv1_out.diff.data = (np.ones(conv1_out.diff.shape))
print a.wait_bp, conv1.wait_forward
import numpy as np

### grad_check

e = 1e-3
a = var.Variable((1, 28, 28, 3), 'a')
b = var.Variable((1, 28, 28, 3), 'b')
c = var.Variable((1, 28, 28, 3), 'c')

b.data = a.data.copy()
c.data = a.data.copy()
a.data += e
b.data -= e

## conv2d
conv1_out = op.Conv2D(a, (3, 3, 3, 3), name='conv1', stride=1,
                      padding=1).output_variables
conv2_out = op.Conv2D(b, (3, 3, 3, 3), name='conv2', stride=1,
                      padding=1).output_variables
conv3_out = op.Conv2D(c, (3, 3, 3, 3), name='conv3', stride=1,
                      padding=1).output_variables

conv1 = var.GLOBAL_VARIABLE_SCOPE['conv1']
conv2 = var.GLOBAL_VARIABLE_SCOPE['conv2']
conv3 = var.GLOBAL_VARIABLE_SCOPE['conv3']
var.GLOBAL_VARIABLE_SCOPE['conv1'].weights.data = var.GLOBAL_VARIABLE_SCOPE[
    'conv2'].weights.data
var.GLOBAL_VARIABLE_SCOPE['conv1'].bias.data = var.GLOBAL_VARIABLE_SCOPE[
    'conv2'].bias.data
var.GLOBAL_VARIABLE_SCOPE['conv3'].weights.data = var.GLOBAL_VARIABLE_SCOPE[
    'conv2'].weights.data
var.GLOBAL_VARIABLE_SCOPE['conv3'].bias.data = var.GLOBAL_VARIABLE_SCOPE[