Beispiel #1
0
    def train(self, sess):
        model_fn = call_wrap(self.model,
                             self.x_in, {self.dropout_prob: 1.0},
                             sess=sess)
        loss_fn = call_wrap(self.loss, [self.x_in, self.y_in],
                            {self.dropout_prob: 1.0},
                            sess=sess)
        train_fn = call_wrap([self.loss, self.optim, self.gs], {
            'x': self.x_in,
            'y': self.y_in,
            self.dropout_prob: 0.5
        },
                             sess=sess)

        def update(step, epoch, batch_x, batch_y):
            curr_loss, _, gs = train_fn(x=batch_x, y=batch_y)
            if step % 500 == 0:
                print(('Loss at local step {:d}, global step {:d}, '
                       'local epoch {:d}: {:0.4f}').format(
                           step, gs, epoch, curr_loss))

        for_batches(update, [self.x, self.y], 128, steps=2000)
        print('Final loss: {:0.4f}'.format(
            mean_batches(loss_fn, [self.x, self.y])))
        print('Train accuracy: {:0.2f}%'.format(
            batch_cat_acc(model_fn, self.x, self.y) * 100))

        test_x, test_y = load_mnist(flat=True, test=True)
        print('Test accuracy: {:0.2f}%'.format(
            batch_cat_acc(model_fn, test_x, test_y) * 100))
Beispiel #2
0
 def build_model(self, gs):
     self.gs = gs
     self.x, self.y = load_mnist(flat=True)
     self.x_in = placeholder_like(self.x, 'x')
     self.y_in = placeholder_like(self.y, 'y', dtype='float32')
     self.dropout_prob = scalar_placeholder()
     self.model = sequential([
         # conv([5, 5], 32),
         # maxpool([2, 2], strides=[2, 2]),
         # conv([5, 5], 64),
         # maxpool([2, 2], strides=[2, 2]),
         # linear(1024),
         # dropout(self.dropout_prob),
         linear(100),
         linear(10, activ=softmax)
     ])(self.x_in)
     self.loss = xentropy(self.model, self.y_in)
     self.optim = adam(lr=0.0001, gs=gs)(self.loss)
     return initialize_vars(execute=False)
Beispiel #3
0
from bones.tasks.mnist import load_mnist
from bones import *

x, y = load_mnist(flat=False)
x_test, y_test = load_mnist(flat=False, test=True)
x_in = placeholder_like(x, 'x')
y_in = placeholder_like(y, 'y', dtype='float32')
dropout_prob = scalar_placeholder()
model = sequential([
    conv([5, 5], 192, activ=relu),
    conv([1, 1], 160, activ=relu),
    conv([1, 1], 96, activ=relu),
    maxpool([3, 3], strides=[2, 2]),
    dropout(dropout_prob),
    conv([5, 5], 192, activ=relu),
    conv([1, 1], 192, activ=relu),
    conv([1, 1], 96, activ=relu),
    avgpool([3, 3], strides=[2, 2]),
    dropout(dropout_prob),
    conv([3, 3], 192, activ=relu),
    conv([1, 1], 192, activ=relu),
    conv([1, 1], 10, activ=relu),
    avgpool([8, 8]),
    linear(10, activ=softmax),
])(x_in)
loss = xentropy(model, y_in)
optim = adam(lr=0.001)(loss)
train(model,
      loss,
      optim,
      x_in,
Beispiel #4
0
from bones.tasks.mnist import load_mnist
from bones import *

x, y = load_mnist(flat=False)
x_in = placeholder_like(x, 'x')
y_in = placeholder_like(y, 'y', dtype='float32')
dropout_prob = scalar_placeholder()
model = sequential([
    conv([5, 5], 32),
    maxpool([2, 2], strides=[2, 2]),
    conv([5, 5], 64),
    maxpool([2, 2], strides=[2, 2]),
    linear(1024),
    dropout(dropout_prob),
    linear(100),
    linear(10, activ=softmax)
])(x_in)
loss = xentropy(model, y_in)
optim = adam(lr=0.0001)(loss)
model_fn = call_wrap(model, x_in, {dropout_prob: 1.0})
loss_fn = call_wrap(loss, [x_in, y_in], {dropout_prob: 1.0})
train_fn = call_wrap([loss, optim], {'x': x_in, 'y': y_in, dropout_prob: 0.5})
initialize_vars()


def update(step, epoch, batch_x, batch_y):
    curr_loss, _ = train_fn(x=batch_x, y=batch_y)
    if step % 500 == 0:
        print('Loss at step {:d}, epoch {:d}: {:0.4f}'.format(
            step, epoch, curr_loss))
Beispiel #5
0
from bones.tasks.mnist import load_mnist
from bones import *

x, y = load_mnist(flat=False)
x_in = placeholder_like(x, 'x')
y_in = placeholder_like(y, 'y', dtype='float32')
dropout_prob = scalar_placeholder()
model = sequential([
    conv([5, 5], 32),
    maxpool([2, 2], strides=[2, 2]),
    conv([5, 5], 64),
    maxpool([2, 2], strides=[2, 2]),
    linear(1024),
    dropout(dropout_prob),
    linear(100),
    linear(10, activ=softmax)
],
                   name='model')(x_in)
loss = xentropy(model, y_in)
optim = adam(lr=0.0001)(loss)
model_fn = call_wrap(model, x_in, {dropout_prob: 1.0})
loss_fn = call_wrap(loss, [x_in, y_in], {dropout_prob: 1.0})
train_fn = call_wrap([loss, optim], {'x': x_in, 'y': y_in, dropout_prob: 0.5})
initialize_vars()
load_vars_json('data/params.json', 'model')

# def update(step, epoch, batch_x, batch_y):
#     curr_loss, _ = train_fn(x=batch_x, y=batch_y)
#     if step % 500 == 0:
#         print('Loss at step {:d}, epoch {:d}: {:0.4f}'
#               .format(step, epoch, curr_loss))