Exemplo n.º 1
0
from libdnn import Classifier


# multi layer perceptron
model = chainer.FunctionSet(l1=F.Linear(2, 100), l2=F.Linear(100, 100), l3=F.Linear(100, 2))


# define forwarding method
def forward(self, x, train):
    h = F.relu(self.model.l1(x))
    h = F.relu(self.model.l2(h))
    y = self.model.l3(h)

    return y

mlp = Classifier(model, gpu=-1)
mlp.set_forward(forward)
mlp.set_optimizer(Opt.AdaDelta, {'rho': 0.9})

arr = []
t = []
for i in range(10000):
    x, y = (numpy.random.rand() - 0.5), (numpy.random.rand() - 0.5)
    arr.append(numpy.array([x, y]))
    if (x < 0. and y < 0.) or (x > 0. and y > 0.):
        t.append(0)
    else:
        t.append(1)

print(mlp.train(numpy.array(arr).astype(numpy.float32), numpy.array(t).astype(numpy.int32)))
print(mlp.test(numpy.array(arr).astype(numpy.float32), numpy.array(t).astype(numpy.int32)))
Exemplo n.º 2
0
# multi layer perceptron
model = chainer.FunctionSet(l1=F.Linear(2, 100),
                            l2=F.Linear(100, 100),
                            l3=F.Linear(100, 2))


# define forwarding method
def forward(self, x, train):
    h = F.relu(self.model.l1(x))
    h = F.relu(self.model.l2(h))
    y = self.model.l3(h)

    return y


mlp = Classifier(model, gpu=-1)
mlp.set_forward(forward)
mlp.set_optimizer(Opt.AdaDelta, {'rho': 0.9})

arr = []
t = []
for i in range(10000):
    x, y = (numpy.random.rand() - 0.5), (numpy.random.rand() - 0.5)
    arr.append(numpy.array([x, y]))
    if (x < 0. and y < 0.) or (x > 0. and y > 0.):
        t.append(0)
    else:
        t.append(1)

print(
    mlp.train(
Exemplo n.º 3
0
    fl4=F.Linear(2304, 576),
    fl5=F.Linear(576, 10)
)


def forward(self, x, train):
    h = F.max_pooling_2d(F.relu(model.bn1(model.conv1(x))), 2)
    h = F.relu(model.bn2(model.conv2(h)))
    h = F.max_pooling_2d(F.relu(model.conv3(h)), 2)
    h = F.dropout(F.relu(model.fl4(h)), train=False)
    y = model.fl5(h)

    return y


cnn = Classifier(model, gpu=-1)
cnn.set_forward(forward)

mnist = fetch_mldata('MNIST original', data_home='.')
perm = numpy.random.permutation(len(mnist.data))
mnist.data = mnist.data.astype(numpy.float32).reshape(70000, 1, 28, 28) / 255
mnist.target = mnist.target.astype(numpy.int32)
train_data = mnist.data[perm][:60000]
train_label = mnist.target[perm][:60000]
test_data = mnist.data[perm][60000:]
test_label = mnist.target[perm][60000:]

for epoch in range(15):
    print('epoch : %d' % (epoch + 1))
    err, acc = cnn.train(train_data, train_label, batchsize=200)
    print(acc, err)
Exemplo n.º 4
0
                            conv3=F.Convolution2D(30, 64, 3, pad=1),
                            fl4=F.Linear(2304, 576),
                            fl5=F.Linear(576, 10))


def forward(self, x, train):
    h = F.max_pooling_2d(F.relu(model.bn1(model.conv1(x))), 2)
    h = F.relu(model.bn2(model.conv2(h)))
    h = F.max_pooling_2d(F.relu(model.conv3(h)), 2)
    h = F.dropout(F.relu(model.fl4(h)), train=False)
    y = model.fl5(h)

    return y


cnn = Classifier(model, gpu=-1)
cnn.set_forward(forward)

mnist = fetch_mldata('MNIST original', data_home='.')
perm = numpy.random.permutation(len(mnist.data))
mnist.data = mnist.data.astype(numpy.float32).reshape(70000, 1, 28, 28) / 255
mnist.target = mnist.target.astype(numpy.int32)
train_data = mnist.data[perm][:60000]
train_label = mnist.target[perm][:60000]
test_data = mnist.data[perm][60000:]
test_label = mnist.target[perm][60000:]

for epoch in range(15):
    print('epoch : %d' % (epoch + 1))
    err, acc = cnn.train(train_data, train_label, batchsize=200)
    print(acc, err)
Exemplo n.º 5
0
                            fl4=F.Linear(576, 576),
                            fl5=F.Linear(576, 10))


def forward(self, x, train):
    h = F.max_pooling_2d(F.relu(model.bn1(model.conv1(x))), 2)
    h = F.max_pooling_2d(h, 2)
    h = F.max_pooling_2d(F.relu(model.bn2(model.conv2(h))), 2)
    h = F.max_pooling_2d(F.relu(model.conv3(h)), 2)
    h = F.dropout(F.relu(model.fl4(h)), train=True)
    y = model.fl5(h)

    return y


cnn = Classifier(model, gpu=-1)
cnn.set_forward(forward)

arr = []
t = []
for i in range(100):
    x = numpy.random.rand(50, 50)
    x = numpy.array([x])
    arr.append(x)
    a = numpy.random.randint(0, 9)
    t.append(a)

print(
    cnn.train(
        numpy.array(arr).astype(numpy.float32),
        numpy.array(t).astype(numpy.int32)))
Exemplo n.º 6
0
    fl4=F.Linear(576, 576),
    fl5=F.Linear(576, 10)
)


def forward(self, x, train):
    h = F.max_pooling_2d(F.relu(model.bn1(model.conv1(x))), 2)
    h = F.max_pooling_2d(h, 2)
    h = F.max_pooling_2d(F.relu(model.bn2(model.conv2(h))), 2)
    h = F.max_pooling_2d(F.relu(model.conv3(h)), 2)
    h = F.dropout(F.relu(model.fl4(h)), train=True)
    y = model.fl5(h)

    return y

cnn = Classifier(model, gpu=-1)
cnn.set_forward(forward)

arr = []
t = []
for i in range(100):
    x = numpy.random.rand(50, 50)
    x = numpy.array([x])
    arr.append(x)
    a = numpy.random.randint(0, 9)
    t.append(a)

print(cnn.train(numpy.array(arr).astype(numpy.float32), numpy.array(t).astype(numpy.int32)))
print(cnn.test(numpy.array(arr).astype(numpy.float32), numpy.array(t).astype(numpy.int32)))

Exemplo n.º 7
0
model = chainer.FunctionSet(
    conv1=F.Convolution2D(1, 15, 5),
    bn1=F.BatchNormalization(15),
    conv2=F.Convolution2D(15, 30, 3, pad=1),
    bn2=F.BatchNormalization(30),
    conv3=F.Convolution2D(30, 64, 3, pad=1),
    fl4=F.Linear(2304, 576),
    fl5=F.Linear(576, 10)
)


def forward(self, x, train):
    h = F.max_pooling_2d(F.relu(model.bn1(model.conv1(x))), 2)
    h = F.relu(model.bn2(model.conv2(h)))
    h = F.max_pooling_2d(F.relu(model.conv3(h)), 2)
    h = F.dropout(F.relu(model.fl4(h)), train=True)
    y = model.fl5(h)

    return y


cnn = Classifier(model, gpu=-1)
cnn.set_forward(forward)
cnn.load_param('./cnn.param.npy')

imager = V.Visualizer(cnn)
imager.plot_filters('conv1', interpolation=True)
imager.plot_filters('conv2', title=False, interpolation=True)
imager.plot_filters('conv3', title=False, interpolation=True)