return pp

if __name__ == "__main__":
    BATCH_SIZE = 128

    mnist = MNIST(batch_class=MyBatch)
    config = dict(some=1, conv=dict(arg1=10))
    print()
    print("Start training...")
    t = time()
    train_tp = (Pipeline(config=config)
                .init_variable('model', VGG16)
                .init_variable('loss_history', init_on_each_run=list)
                .init_variable('current_loss', init_on_each_run=0)
                .init_variable('input_tensor_name', 'images')
                .init_model('dynamic', V('model'), 'conv',
                            config={'session': {'config': tf.ConfigProto(allow_soft_placement=True)},
                                    'loss': 'ce',
                                    'optimizer': {'name':'Adam', 'use_locking': True},
                                    'inputs': dict(images={'shape': (None, None, 1)}, #'shape': (28, 28, 1), 'transform': 'mip @ 1'},
                                                   #labels={'shape': 10, 'dtype': 'uint8',
                                                   labels={'classes': (10+np.arange(10)).astype('str'),
                                                           'transform': 'ohe', 'name': 'targets'}),
                                    'input_block/inputs': 'images',
                                    'output': dict(ops=['labels', 'accuracy'])})
                .make_digits()
                .train_model('conv', fetches='loss',
                                     feed_dict={V('input_tensor_name'): B('images'),
                                                'labels': B('digits')},
                             save_to=V('current_loss'))
                #.print_variable('current_loss')

if __name__ == "__main__":
    BATCH_SIZE = 64

    mnist = MNIST()

    train_template = (
        Pipeline(config=dict(model=VGG7)).init_variable(
            'model', ResNet18).init_variable(
                'loss_history',
                init_on_each_run=list).init_variable('current_loss',
                                                     init_on_each_run=0).
        init_variable('pred_label', init_on_each_run=list).init_model(
            'dynamic',
            V('model'),
            'conv',
            config={
                'inputs':
                dict(images={'shape': B('image_shape')},
                     labels={
                         'classes': 10,
                         'transform': 'ohe',
                         'name': 'targets'
                     }),
                'input_block/inputs':
                'images',
                'input_block/filters':
                16,
                #'body/block/bottleneck': 1,
                #'head/units': [100, 100, 10],
Esempio n. 3
0
                            #'input_block/filters': 32,
                            'input_block/inputs':
                            'images',
                            #'body/filters': [16,32,64,128],
                        })
        #'output': dict(ops=['labels', 'accuracy'])})
        .train_model(
            'conv',
            fetches='loss',
            feed_dict={
                'images': B('images'),
                'masks': F(make_masks)
            },
            #feed_dict={'images': F(make3d_images), #B('images'),
            #           'masks': F(make3d_masks)},
            save_to=V('current_loss')).print(
                V('current_loss')).update_variable('loss_history',
                                                   V('current_loss'),
                                                   mode='a'))

    train_pp = (train_template << mnist.train)
    print("Start training...")
    t = time()
    train_pp.run(BATCH_SIZE,
                 shuffle=True,
                 n_epochs=1,
                 drop_last=False,
                 prefetch=0)
    print("End training", time() - t)

    print()
Esempio n. 4
0
        pass


if __name__ == "__main__":
    BATCH_SIZE = 64

    #mnist = MNIST()
    mnist = CIFAR10()

    train_template = (
        Pipeline(config=dict(model=MobileNet_v2)).init_variable(
            'model', C('model')).init_variable('loss_history',
                                               init_on_each_run=list).
        init_variable('current_loss', init_on_each_run=0).init_model(
            'dynamic',
            V('model'),
            'conv',
            config={
                'inputs':
                dict(images={'shape': B('image_shape')},
                     labels={
                         'classes': 10,
                         'transform': 'ohe',
                         'name': 'targets'
                     }),
                'input_block/inputs':
                'images',
                #'input_block/filters': 16,
                #'body/block/bottleneck': 1,
                #'head/units': [100, 100, 10],
                #'nothing': F(lambda batch: batch.images.shape[1:]),
Esempio n. 5
0
class MyModel(TFModel):
    def _build(self, config=None):
        tf.losses.add_loss(1.)
        pass

if __name__ == "__main__":
    BATCH_SIZE = 64

    mnist = MNIST()

    train_template = (Pipeline(config=dict(model=VGG7))
                .init_variable('model', ResNetAttention56)
                .init_variable('loss_history', init_on_each_run=list)
                .init_variable('current_loss', init_on_each_run=0)
                .init_variable('pred_label', init_on_each_run=list)
                .init_model('dynamic', V('model'), 'conv',
                            config={'inputs': dict(images={'shape': B('image_shape')},
                                                   labels={'classes': 10, 'transform': 'ohe', 'name': 'targets'}),
                                    'input_block/inputs': 'images',
                                    'input_block/filters': 16,
                                    #'body/block/bottleneck': 1,
                                    #'head/units': [100, 100, 10],
                                    #'nothing': F(lambda batch: batch.images.shape[1:]),
                                    #'filters': 16, 'width_factor': 1,
                                    #'body': dict(se_block=1, se_factor=4, resnext=1, resnext_factor=4, bottleneck=1),
                                    'output': dict(ops=['accuracy'])})
                #.resize(shape=(64, 64))
                .train_model('conv', fetches='loss',
                                     feed_dict={'images': B('images'),
                                                'labels': B('labels')},
                             save_to=V('current_loss'), use_lock=True)