def test_CyclicLR(self):
        model1 = Sequential(self.s, model_table = 'Simple_CNN1')
        model1.add(InputLayer(3, 224, 224))
        model1.add(Conv2d(8, 7))
        model1.add(Pooling(2))
        model1.add(Conv2d(8, 7))
        model1.add(Pooling(2))
        model1.add(Dense(16))
        model1.add(OutputLayer(act = 'softmax', n = 2))

        if self.data_dir is None:
            unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")

        caslib, path, tmp_caslib = caslibify(self.s, path = self.data_dir + 'images.sashdat', task = 'load')

        self.s.table.loadtable(caslib = caslib,
                               casout = {'name': 'eee', 'replace': True},
                               path = path)
        lrs = CyclicLR(self.s, 'eee', 4, 1.0, 0.0000001, 0.01)
        solver = VanillaSolver(lr_scheduler=lrs)
        self.assertTrue(self.sample_syntax['CyclicLR'] == solver)

        optimizer = Optimizer(algorithm = solver, log_level = 3, max_epochs = 4, mini_batch_size = 2)
        r = model1.fit(data = 'eee', inputs = '_image_', target = '_label_', optimizer = optimizer, n_threads=2)
        if r.severity > 0:
            for msg in r.messages:
                print(msg)
        self.assertTrue(r.severity <= 1)
Esempio n. 2
0
    def test_model13b(self):
        model = Sequential(self.s, model_table='simple_cnn')
        model.add(layer=InputLayer(n_channels=1, height=10, width=10))
        model.add(layer=OutputLayer(n=10, full_connect=False))
        self.assertTrue(model.summary.loc[1, 'Number of Parameters'] == (0, 0))

        model1 = Sequential(self.s, model_table='simple_cnn')
        model1.add(layer=InputLayer(n_channels=1, height=10, width=10))
        model1.add(layer=OutputLayer(n=10, full_connect=True))
        self.assertTrue(model1.summary.loc[1, 'Number of Parameters'] == (1000, 10))

        model2 = Sequential(self.s, model_table='Simple_CNN')
        model2.add(layer=InputLayer(n_channels=1, height=10, width=10))
        model2.add(layer=OutputLayer(n=10, full_connect=True, include_bias=False))
        self.assertTrue(model2.summary.loc[1, 'Number of Parameters'] == (1000, 0))

        model3 = Sequential(self.s, model_table='Simple_CNN')
        model3.add(layer=InputLayer(n_channels=1, height=10, width=10))
        model3.add(layer=Conv2d(4, 3))
        model3.add(layer=OutputLayer(n=10))
        self.assertTrue(model3.summary.loc[2, 'Number of Parameters'] == (4000, 10))

        model4 = Sequential(self.s, model_table='Simple_CNN')
        model4.add(layer=InputLayer(n_channels=1, height=10, width=10))
        model4.add(layer=Conv2d(4, 3))
        model4.add(layer=OutputLayer(n=10, full_connect=False))
        self.assertTrue(model4.summary.loc[2, 'Number of Parameters'] == (0, 0))
Esempio n. 3
0
    def test_model1(self):

        model1 = Sequential(self.s, model_table='Simple_CNN1')
        model1.add(InputLayer(3, 224, 224))
        model1.add(Conv2d(8, 7))
        model1.add(Pooling(2))
        model1.add(Conv2d(8, 7))
        model1.add(Pooling(2))
        model1.add(Dense(16))
        model1.add(OutputLayer(act='softmax', n=2))

        if self.data_dir is None:
            unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")

        caslib, path, tmp_caslib = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')

        self.s.table.loadtable(caslib=caslib,
                               casout={'name': 'eee', 'replace': True},
                               path=path)

        r = model1.fit(data='eee', inputs='_image_', target='_label_', lr=0.001)
        if r.severity > 0:
            for msg in r.messages:
                print(msg)
        self.assertTrue(r.severity <= 1)
        
        if (caslib is not None) and tmp_caslib:
            self.s.retrieve('table.dropcaslib', message_level = 'error', caslib = caslib)
Esempio n. 4
0
    def test_model12(self):
        model1 = Sequential(self.s, model_table='Simple_CNN1')
        model1.add(InputLayer(3, 224, 224))
        model1.add(Conv2d(8, 7))
        model1.add(Pooling(2))
        model1.add(Conv2d(8, 7))
        model1.add(Pooling(2))
        model1.add(Dense(16))
        model1.add(OutputLayer(act='softmax', n=2))

        if self.data_dir is None:
            unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")

        caslib, path, tmp_caslib = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')

        self.s.table.loadtable(caslib=caslib,
                               casout={'name': 'eee', 'replace': True},
                               path=path)

        r = model1.fit(data='eee', inputs='_image_', target='_label_', save_best_weights=True)
        self.assertTrue(r.severity == 0)

        r1 = model1.fit(data='eee', inputs='_image_', target='_label_', max_epochs=3)
        self.assertTrue(r1.severity == 0)

        r2 = model1.fit(data='eee', inputs='_image_', target='_label_', max_epochs=2, save_best_weights=True)
        self.assertTrue(r2.severity == 0)

        r3 = model1.predict(data='eee', use_best_weights=True)
        self.assertTrue(r3.severity == 0)

        if (caslib is not None) and tmp_caslib:
            self.s.retrieve('table.dropcaslib', message_level = 'error', caslib = caslib)
Esempio n. 5
0
    def test_model18(self):
        model1 = Sequential(self.s, model_table='Simple_CNN1')
        model1.add(InputLayer(3, 224, 224))
        model1.add(Conv2d(8, 7))
        model1.add(Pooling(2))
        model1.add(Conv2d(8, 7))
        model1.add(Pooling(2))
        model1.add(Dense(16))
        model1.add(OutputLayer(act='softmax', n=2))

        if self.data_dir is None:
            unittest.TestCase.skipTest(
                self, "DLPY_DATA_DIR is not set in the environment variables")

        caslib, path = caslibify(self.s,
                                 path=self.data_dir + 'images.sashdat',
                                 task='load')

        self.s.table.loadtable(caslib=caslib,
                               casout={
                                   'name': 'eee',
                                   'replace': True
                               },
                               path=path)

        r = model1.fit(data='eee',
                       inputs='_image_',
                       target='_label_',
                       max_epochs=1)
        self.assertTrue(r.severity == 0)

        model1.save_weights_csv(self.data_dir)
Esempio n. 6
0
    def test_model22(self):
        model1 = Sequential(self.s, model_table='Simple_CNN1')
        model1.add(InputLayer(3, 224, 224))
        model1.add(Conv2d(8, 7))
        pool1 = Pooling(2)
        model1.add(pool1)
        conv1 = Conv2d(1, 1, act='identity', src_layers=[pool1])
        model1.add(conv1)
        model1.add(Res(act='relu', src_layers=[conv1, pool1]))
        model1.add(Pooling(2))
        model1.add(Dense(2))
        model1.add(OutputLayer(act='softmax', n=2))

        if self.data_dir is None:
            unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")

        caslib, path = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')

        self.s.table.loadtable(caslib=caslib,
                               casout={'name': 'eee', 'replace': True},
                               path=path)

        r = model1.fit(data='eee', inputs='_image_', target='_label_', max_epochs=1)
        self.assertTrue(r.severity == 0)

        model1.deploy(self.data_dir, output_format='onnx')
Esempio n. 7
0
    def test_model23(self):
        try:
            import onnx
        except:
            unittest.TestCase.skipTest(self, "onnx not found in the libraries")

        model1 = Sequential(self.s, model_table='Simple_CNN1')
        model1.add(InputLayer(3, 224, 224))
        model1.add(Conv2d(8, 7, act='identity', include_bias=False))
        model1.add(BN(act='relu'))
        model1.add(Pooling(2))
        model1.add(Conv2d(8, 7, act='identity', include_bias=False))
        model1.add(BN(act='relu'))
        model1.add(Pooling(2))
        model1.add(Dense(2))
        model1.add(OutputLayer(act='softmax', n=2))

        if self.data_dir is None:
            unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")

        caslib, path = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')

        self.s.table.loadtable(caslib=caslib,
                               casout={'name': 'eee', 'replace': True},
                               path=path)

        r = model1.fit(data='eee', inputs='_image_', target='_label_', max_epochs=1)
        self.assertTrue(r.severity == 0)

        model1.deploy(self.data_dir, output_format='onnx')
Esempio n. 8
0
    def test_model15(self):
        # test RECTIFIER activation for concat layer
        try:
            import onnx
        except:
            unittest.TestCase.skipTest(self, "onnx not found in the libraries")

        model1 = Sequential(self.s, model_table='Simple_CNN1')
        model1.add(InputLayer(3, 224, 224))
        model1.add(Conv2d(8, 7))
        pool1 = Pooling(2)
        model1.add(pool1)
        conv1 = Conv2d(1, 7, src_layers=[pool1])
        conv2 = Conv2d(1, 7, src_layers=[pool1])
        model1.add(conv1)
        model1.add(conv2)
        model1.add(Concat(act='RECTIFIER', src_layers=[conv1, conv2]))
        model1.add(Pooling(2))
        model1.add(Dense(2))
        model1.add(OutputLayer(act='softmax', n=2))

        if self.data_dir is None:
            unittest.TestCase.skipTest(
                self, "DLPY_DATA_DIR is not set in the environment variables")

        caslib, path, tmp_caslib = caslibify(self.s,
                                             path=self.data_dir +
                                             'images.sashdat',
                                             task='load')

        self.s.table.loadtable(caslib=caslib,
                               casout={
                                   'name': 'eee',
                                   'replace': True
                               },
                               path=path)

        r = model1.fit(data='eee',
                       inputs='_image_',
                       target='_label_',
                       max_epochs=1)
        self.assertTrue(r.severity == 0)

        import tempfile
        tmp_dir_to_dump = tempfile.gettempdir()

        model1.deploy(tmp_dir_to_dump, output_format='onnx')

        import os
        os.remove(os.path.join(tmp_dir_to_dump, "Simple_CNN1.onnx"))

        if (caslib is not None) and tmp_caslib:
            self.s.retrieve('table.dropcaslib',
                            message_level='error',
                            caslib=caslib)
Esempio n. 9
0
    def test_model13(self):
        # test dropout
        try:
            import onnx
        except:
            unittest.TestCase.skipTest(self, "onnx not found in the libraries")

        model1 = Sequential(self.s, model_table='Simple_CNN1')
        model1.add(InputLayer(3, 224, 224))
        model1.add(
            Conv2d(8, 7, act='IDENTITY', dropout=0.5, include_bias=False))
        model1.add(BN(act='relu'))
        model1.add(Pooling(2, pool='MEAN', dropout=0.5))
        model1.add(
            Conv2d(8, 7, act='IDENTITY', dropout=0.5, include_bias=False))
        model1.add(BN(act='relu'))
        model1.add(Pooling(2, pool='MEAN', dropout=0.5))
        model1.add(Conv2d(8, 7, act='identity', include_bias=False))
        model1.add(BN(act='relu'))
        model1.add(Dense(16, act='IDENTITY', dropout=0.1))
        model1.add(OutputLayer(act='softmax', n=2))

        if self.data_dir is None:
            unittest.TestCase.skipTest(
                self, "DLPY_DATA_DIR is not set in the environment variables")

        caslib, path, tmp_caslib = caslibify(self.s,
                                             path=self.data_dir +
                                             'images.sashdat',
                                             task='load')

        self.s.table.loadtable(caslib=caslib,
                               casout={
                                   'name': 'eee',
                                   'replace': True
                               },
                               path=path)

        r = model1.fit(data='eee',
                       inputs='_image_',
                       target='_label_',
                       max_epochs=2)
        self.assertTrue(r.severity == 0)

        import tempfile
        tmp_dir_to_dump = tempfile.gettempdir()
        model1.deploy(tmp_dir_to_dump, output_format='onnx')

        import os
        os.remove(os.path.join(tmp_dir_to_dump, "Simple_CNN1.onnx"))

        if (caslib is not None) and tmp_caslib:
            self.s.retrieve('table.dropcaslib',
                            message_level='error',
                            caslib=caslib)
Esempio n. 10
0
def downsampling_bottleneck(x, in_depth, out_depth, projection_ratio=4):
    '''
    Defines the down-sampling bottleneck of ENet

    Parameters
    ----------
    x : class:`Layer'
        Previous layer to this block
    in_depth : int
        Depth of the layer fed into this block
    out_depth : int
        Depth of the output layer of this block
    projection_ratio : int, optional
        Used to calculate the reduced_depth for intermediate convolution layers
        Default: 4

    Returns
    -------
    :class:`Res`
    '''

    reduced_depth = int(in_depth // projection_ratio)

    conv1 = Conv2d(reduced_depth,
                   3,
                   stride=2,
                   padding=1,
                   act='identity',
                   include_bias=False)(x)
    bn1 = BN(act='relu')(conv1)

    conv2 = Conv2d(reduced_depth,
                   3,
                   stride=1,
                   act='identity',
                   include_bias=False)(bn1)
    bn2 = BN(act='relu')(conv2)

    conv3 = Conv2d(out_depth, 1, stride=1, act='identity',
                   include_bias=False)(bn2)
    bn3 = BN(act='relu')(conv3)

    pool1 = Pooling(2, stride=2)(x)
    conv4 = Conv2d(out_depth, 1, stride=1, act='identity',
                   include_bias=False)(pool1)
    bn4 = BN(act='relu')(conv4)

    res = Res()([bn3, bn4])

    return res
Esempio n. 11
0
    def test_plot_ticks(self):

        model1 = Sequential(self.s, model_table='Simple_CNN1')
        model1.add(InputLayer(3, 224, 224))
        model1.add(Conv2d(8, 7))
        model1.add(Pooling(2))
        model1.add(Conv2d(8, 7))
        model1.add(Pooling(2))
        model1.add(Dense(16))
        model1.add(OutputLayer(act='softmax', n=2))

        if self.data_dir is None:
            unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")

        caslib, path, tmp_caslib = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')

        self.s.table.loadtable(caslib=caslib,
                               casout={'name': 'eee', 'replace': True},
                               path=path)

        r = model1.fit(data='eee', inputs='_image_', target='_label_', lr=0.001, max_epochs=5)
        
        # Test default tick_frequency value of 1
        ax = model1.plot_training_history()
        self.assertEqual(len(ax.xaxis.majorTicks), model1.n_epochs)

        # Test even
        tick_frequency = 2
        ax = model1.plot_training_history(tick_frequency=tick_frequency)
        self.assertEqual(len(ax.xaxis.majorTicks), model1.n_epochs // tick_frequency + 1)

        # Test odd
        tick_frequency = 3
        ax = model1.plot_training_history(tick_frequency=tick_frequency)
        self.assertEqual(len(ax.xaxis.majorTicks), model1.n_epochs // tick_frequency + 1)

        # Test max
        tick_frequency = model1.n_epochs
        ax = model1.plot_training_history(tick_frequency=tick_frequency)
        self.assertEqual(len(ax.xaxis.majorTicks), model1.n_epochs // tick_frequency + 1)
        
        # Test 0 
        tick_frequency = 0
        ax = model1.plot_training_history(tick_frequency=tick_frequency)
        self.assertEqual(len(ax.xaxis.majorTicks), model1.n_epochs)

        if (caslib is not None) and tmp_caslib:
            self.s.retrieve('table.dropcaslib', message_level = 'error', caslib = caslib)
Esempio n. 12
0
 def test_pool_layer2(self):
     dict1 = Pooling(name='pool2',
                     width=3,
                     height=2,
                     src_layers=[Conv2d(n_filters=3,
                                        name='conv')]).to_model_params()
     self.assertTrue(self.sample_syntax['pool2'] == dict1)
Esempio n. 13
0
 def test_conv2d_layer1(self):
     dict1 = Conv2d(name='convo1',
                    n_filters=10,
                    act='relu',
                    src_layers=[InputLayer(name='input1')
                                ]).to_model_params()
     self.assertTrue(self.sample_syntax['convo1'] == dict1)
Esempio n. 14
0
    def _conv_block(inputs, filters, alpha, kernel=3, stride=1):
        """
        Adds an initial convolution layer (with batch normalization

        inputs:
            Input tensor
        filters:
            the dimensionality of the output space
        alpha: controls the width of the network.
            - If `alpha` < 1.0, proportionally decreases the number
                of filters in each layer.
            - If `alpha` > 1.0, proportionally increases the number
                of filters in each layer.
            - If `alpha` = 1, default number of filters from the paper
                 are used at each layer.
        kernel:
            specifying the width and height of the 2D convolution window.
        strides:
            the strides of the convolution

        """
        filters = int(filters * alpha)
        x = Conv2d(filters,
                   kernel,
                   act='identity',
                   include_bias=False,
                   stride=stride,
                   name='conv1')(inputs)
        x = BN(name='conv1_bn', act='relu')(x)
        return x, filters
Esempio n. 15
0
    def test_model22_1(self):
        try:
            import onnx
        except:
            unittest.TestCase.skipTest(self, "onnx not found in the libraries")
        from onnx import numpy_helper
        import numpy as np

        model1 = Sequential(self.s, model_table='Simple_CNN1')
        model1.add(InputLayer(3, 224, 224))
        model1.add(Conv2d(8, 7, act='identity', include_bias=False))
        model1.add(Reshape(height=448, width=448, depth=2))
        model1.add(Dense(2))
        model1.add(OutputLayer(act='softmax', n=2))

        if self.data_dir is None:
            unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")

        caslib, path = caslibify(self.s, path=self.data_dir+'images.sashdat', task='load')

        self.s.table.loadtable(caslib=caslib,
                               casout={'name': 'eee', 'replace': True},
                               path=path)

        r = model1.fit(data='eee', inputs='_image_', target='_label_', max_epochs=1)
        self.assertTrue(r.severity == 0)

        model1.deploy(self.data_dir_local, output_format='onnx')

        model_path = os.path.join(self.data_dir_local, 'Simple_CNN1.onnx')
        m = onnx.load(model_path)
        self.assertEqual(m.graph.node[1].op_type, 'Reshape')
        init = numpy_helper.to_array(m.graph.initializer[1])
        self.assertTrue(np.array_equal(init, [ -1,  2, 448, 448]))
Esempio n. 16
0
 def test_conv2d_layer_name_conflict(self):
     if __dev__:
         dict1 = Conv2d(n_filters=32, width=5, height=7, name='convo2',
                        stride_horizontal = 1, strideHorizontal=10,
                        include_bias=False, includeBias=True)
         bias = dict1.num_bias
         self.assertTrue(bias == 32)
Esempio n. 17
0
    def test_build_gan_model_4(self):

        if self.server_dir is None:
            unittest.TestCase.skipTest(self, "DLPY_DATA_DIR_SERVER is not set in the environment variables")

        discriminator = Sequential(self.s)
        discriminator.add(InputLayer(1, 28, 28))
        discriminator.add(Conv2d(3, 3))
        discriminator.add(Pooling(2))
        discriminator.add(Conv2d(3, 3))
        discriminator.add(Pooling(2))
        discriminator.add(Dense(16))
        discriminator.add(OutputLayer(n=1))

        generator = Sequential(self.s)
        generator.add(InputLayer(1, 100, 1))
        generator.add(Dense(256, act='relu'))
        generator.add(Dense(512, act='relu'))
        generator.add(Dense(1024, act='relu'))
        generator.add(Dense(28 * 28, act='tanh'))
        generator.add(OutputLayer(act='softmax', n=2))

        encoder = Sequential(self.s)
        encoder.add(InputLayer(100, 1, 1))
        encoder.add(Dense(256, act='relu'))
        encoder.add(Dense(512, act='relu'))
        encoder.add(Dense(1024, act='relu'))
        encoder.add(Dense(100, act='tanh'))
        encoder.add(OutputLayer(act='softmax', n=2))

        gan_model = GANModel(generator, discriminator, encoder)

        res = gan_model.models['generator'].print_summary()
        print(res)

        res = gan_model.models['discriminator'].print_summary()
        print(res)

        from dlpy.model import Optimizer, MomentumSolver, AdamSolver
        solver = AdamSolver(lr_scheduler=StepLR(learning_rate=0.0001, step_size=4), clip_grad_max=100,
                            clip_grad_min=-100)
        optimizer = Optimizer(algorithm=solver, mini_batch_size=8, log_level=2, max_epochs=4, reg_l2=0.0001)

        res = gan_model.fit(optimizer, optimizer, self.server_dir + 'mnist_validate',
                            n_samples_generator=32, n_samples_discriminator=32, max_iter=2, n_threads=1,
                            damping_factor=0.5)
        print(res)
Esempio n. 18
0
 def test_conv2d_layer2(self):
     dict1 = Conv2d(n_filters=32,
                    width=5,
                    height=7,
                    name='convo2',
                    src_layers=[InputLayer(name='input1')
                                ]).to_model_params()
     self.assertTrue(self.sample_syntax['convo2'] == dict1)
Esempio n. 19
0
 def test_conv2d_layer_name_format2(self):
     if __dev__:
         dict1 = Conv2d(n_filters=32,
                        width=5,
                        height=7,
                        name='convo2',
                        include_bias=False)
         bias = dict1.num_bias
         self.assertTrue(bias == 0)
Esempio n. 20
0
def upsampling_bottleneck(x, in_depth, out_depth, projection_ratio=4):
    '''
    Defines the up-sampling bottleneck of ENet

    Parameters
    ----------
    x : class:`Layer'
       Previous layer to this block
    in_depth : int
       Depth of the layer fed into this block
    out_depth : int
       Depth of the output layer of this block
    projection_ratio : int, optional
       Used to calculate the reduced_depth for intermediate convolution layers
       Default: 4

    Returns
    -------
    :class:`BN`
    '''

    reduced_depth = int(in_depth // projection_ratio)

    conv1 = Conv2d(reduced_depth,
                   1,
                   stride=1,
                   act='identity',
                   include_bias=False)(x)
    bn1 = BN(act='relu')(conv1)

    tconv1 = Conv2DTranspose(reduced_depth,
                             3,
                             stride=2,
                             padding=1,
                             output_padding=1,
                             act='identity',
                             include_bias=False)(bn1)
    bn2 = BN(act='relu')(tconv1)

    conv3 = Conv2d(out_depth, 1, stride=1, act='identity',
                   include_bias=False)(bn2)
    bn3 = BN(act='relu')(conv3)

    return bn3
Esempio n. 21
0
    def test_model13a(self):
        model = Sequential(self.s, model_table='simple_cnn')
        model.add(InputLayer(3, 224, 224))
        model.add(Conv2d(2, 3))
        model.add(Pooling(2))
        model.add(Dense(4))
        model.add(OutputLayer(n=2))

        if self.data_dir is None:
            unittest.TestCase.skipTest(self, "DLPY_DATA_DIR is not set in the environment variables")

        model.save_to_table(self.data_dir)
Esempio n. 22
0
    def test_imagescaler2(self):
        # test export model with imagescaler
        try:
            import onnx
        except:
            unittest.TestCase.skipTest(self, 'onnx not found')

        if self.data_dir_local is None:
            unittest.TestCase.skipTest(
                self, 'DLPY_DATA_DIR_LOCAL is not set in '
                'the environment variables')

        model1 = Sequential(self.s, model_table='imagescaler2')
        model1.add(
            InputLayer(n_channels=3,
                       width=224,
                       height=224,
                       scale=1 / 255.,
                       offsets=[0.1, 0.2, 0.3]))
        model1.add(Conv2d(8, 7))
        model1.add(Pooling(2))
        model1.add(OutputLayer(act='softmax', n=2))

        caslib, path = caslibify(self.s,
                                 path=self.data_dir + 'images.sashdat',
                                 task='load')
        self.s.table.loadtable(caslib=caslib,
                               casout={
                                   'name': 'eee',
                                   'replace': True
                               },
                               path=path)
        r = model1.fit(data='eee',
                       inputs='_image_',
                       target='_label_',
                       max_epochs=1)
        self.assertTrue(r.severity == 0)

        from dlpy.model_conversion.write_onnx_model import sas_to_onnx
        onnx_model = sas_to_onnx(model1.layers,
                                 self.s.CASTable('imagescaler2'),
                                 self.s.CASTable('imagescaler2_weights'))

        self.assertAlmostEqual(onnx_model.graph.node[0].attribute[0].floats[0],
                               0.1)
        self.assertAlmostEqual(onnx_model.graph.node[0].attribute[0].floats[1],
                               0.2)
        self.assertAlmostEqual(onnx_model.graph.node[0].attribute[0].floats[2],
                               0.3)
        self.assertAlmostEqual(onnx_model.graph.node[0].attribute[1].f,
                               1 / 255.)
Esempio n. 23
0
    def _depthwise_conv_block(inputs,
                              n_groups,
                              pointwise_conv_filters,
                              alpha,
                              depth_multiplier=1,
                              stride=1,
                              block_id=1):
        """Adds a depthwise convolution block.

        inputs:
            Input tensor
        n_groups : int
            number of groups
        pointwise_conv_filters:
            the dimensionality of the output space
        alpha: controls the width of the network.
            - If `alpha` < 1.0, proportionally decreases the number
                of filters in each layer.
            - If `alpha` > 1.0, proportionally increases the number
                of filters in each layer.
            - If `alpha` = 1, default number of filters from the paper
                 are used at each layer.
        depth_multiplier:
            The number of depthwise convolution output channels
        strides: An integer or tuple/list of 2 integers,
            specifying the strides of the convolution
        block_id: Integer, a unique identification designating
            the block number.

        """
        pointwise_conv_filters = int(pointwise_conv_filters * alpha)

        x = GroupConv2d(n_groups * depth_multiplier,
                        n_groups,
                        3,
                        stride=stride,
                        act='identity',
                        include_bias=False,
                        name='conv_dw_%d' % block_id)(inputs)
        x = BN(name='conv_dw_%d_bn' % block_id, act='relu')(x)

        x = Conv2d(pointwise_conv_filters,
                   1,
                   act='identity',
                   include_bias=False,
                   stride=1,
                   name='conv_pw_%d' % block_id)(x)
        x = BN(name='conv_pw_%d_bn' % block_id, act='relu')(x)
        return x, pointwise_conv_filters
Esempio n. 24
0
    def setUp(self):
        swat.reset_option()
        swat.options.cas.print_messages = False
        swat.options.interactive_mode = False

        self.s = swat.CAS(HOST, PORT, USER, PASSWD, protocol=PROTOCOL)

        if type(self).server_type is None:
            # Set once per class and have every test use it. No need to change between tests.
            type(self).server_type = tm.get_cas_host_type(self.s)

        self.srcLib = tm.get_casout_lib(self.server_type)

        # Define the model
        model = Sequential(self.s, model_table='test_model')
        model.add(InputLayer(3, 224, 224, offsets=(0, 0, 0)))
        model.add(Conv2d(8, 7))
        model.add(Pooling(2))
        model.add(Conv2d(8, 7))
        model.add(Pooling(2))
        model.add(Dense(16))
        model.add(OutputLayer(act='softmax', n=2))

        self.model = model
Esempio n. 25
0
def initial_block(inp):
    '''
    Defines the initial block of ENet

    Parameters
    ----------
    inp : class:`InputLayer`
    Input layer

    Returns
    -------
    :class:`Concat`
    '''
    x = Conv2d(13, 3, stride=2, padding=1, act='identity',
               include_bias=False)(inp)
    x_bn = BN(act='relu')(x)
    y = Pooling(2)(inp)
    merge = Concat()([x_bn, y])

    return merge
Esempio n. 26
0
    def test_model_crnn_bug(self):
        model = Sequential(self.s, model_table='crnn')
        model.add(InputLayer(3,256,16))
        model.add(Reshape(height=16,width=256,depth=3))

        model.add(Conv2d(64,3,3,stride=1,padding=1))                # size = 16x256x64
        model.add(Pooling(2,2,2))                                   # size = 8x128x64

        model.add(Conv2d(128,3,3,stride=1,padding=1))               # size = 8x128x128
        model.add(Pooling(2,2,2))                                   # size = 4x64x128

        model.add(Conv2d(256,3,3,stride=1,padding=1,act='IDENTITY')) # size = 4x64x256
        model.add(BN(act='RELU'))                   # size = 4x64x256

        model.add(Conv2d(256,3,3,stride=1,padding=1))              # size = 4x64x256


        model.add(Pooling(1,2,stride_horizontal=1, stride_vertical=2))



        #, padding=1))           #  size = 2x64x256
        #model.add(Pooling(1,2,stride=2,stride_horizontal=1, stride_vertical=2,))           # size = 2x64x256

        model.add(Conv2d(512,3,3,stride=1,padding=1, act='IDENTITY')) # size = 2x64x512
        model.add(BN(act='RELU'))

        model.add(Conv2d(512,3,3,stride=1,padding=1))              # size = 2x64x512
        model.add(Pooling(1,2,stride_horizontal=1, stride_vertical=2)) #, padding=1))           # size = 1x64x512
        #model.add(Pooling(1,2,stride=2,stride_horizontal=1, stride_vertical=2,))           # size = 1x64x512

        model.add(Conv2d(512,3,3,stride=1,padding=1, act='IDENTITY')) # size = 1x64x512
        model.add(BN(act='RELU'))

        model.add(Reshape(order='DWH',width=64, height=512, depth=1))

        model.add(Recurrent(512,output_type='SAMELENGTH'))

        model.add(OutputLayer(error='CTC'))

        model.print_summary()
Esempio n. 27
0
def MobileNetV2_ONNX(conn,
                     model_file,
                     n_classes=1000,
                     width=224,
                     height=224,
                     offsets=(255 * 0.406, 255 * 0.456, 255 * 0.485),
                     norm_stds=(255 * 0.225, 255 * 0.224, 255 * 0.229),
                     random_flip=None,
                     random_crop=None,
                     random_mutation=None,
                     include_top=False):
    """
    Generates a deep learning model with the MobileNetV2_ONNX architecture.
    The model architecture and pre-trained weights is generated from MobileNetV2 ONNX trained on ImageNet dataset.
    The model file and the weights file can be downloaded from https://support.sas.com/documentation/prod-p/vdmml/zip/.
    To learn more information about the model and pre-processing.
    Please go to the websites: https://github.com/onnx/models/tree/master/vision/classification/mobilenet.

    Parameters
    ----------
    conn : CAS
        Specifies the CAS connection object.
    model_file : string
        Specifies the absolute server-side path of the model table file.
        The model table file can be downloaded from https://support.sas.com/documentation/prod-p/vdmml/zip/.
    n_classes : int, optional
        Specifies the number of classes.
        Default: 1000
    width : int, optional
        Specifies the width of the input layer.
        Default: 224
    height : int, optional
        Specifies the height of the input layer.
        Default: 224
    offsets : double or iter-of-doubles, optional
        Specifies an offset for each channel in the input data. The final input
        data is set after applying scaling and subtracting the specified offsets.
        The channel order is BGR.
        Default: (255*0.406, 255*0.456, 255*0.485)
    norm_stds : double or iter-of-doubles, optional
        Specifies a standard deviation for each channel in the input data.
        The final input data is normalized with specified means and standard deviations.
        The channel order is BGR.
        Default: (255*0.225, 255*0.224, 255*0.229)
    random_flip : string, optional
        Specifies how to flip the data in the input layer when image data is
        used. Approximately half of the input data is subject to flipping.
        Valid Values: 'h', 'hv', 'v', 'none'
    random_crop : string, optional
        Specifies how to crop the data in the input layer when image data is
        used. Images are cropped to the values that are specified in the width
        and height parameters. Only the images with one or both dimensions
        that are larger than those sizes are cropped.
        Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
    random_mutation : string, optional
        Specifies how to apply data augmentations/mutations to the data in the input layer.
        Valid Values: 'none', 'random'
    include_top : bool, optional
        Specifies whether to include pre-trained weights of the top layers (i.e., the FC layers)
        Default: False

    """
    parameters = locals()
    input_parameters = get_layer_options(input_layer_options, parameters)

    # load model and model weights
    model = Model.from_sashdat(conn, path=model_file)
    # check if a user points to a correct model.
    if model.summary.shape[0] != 120:
        raise DLPyError(
            "The model file doesn't point to a valid MobileNetV2_ONNX model. "
            "Please check the SASHDAT file.")
    # extract input layer config
    model_table_df = conn.CASTable(**model.model_table).to_frame()
    input_layer_df = model_table_df[model_table_df['_DLLayerID_'] == 0]
    input_layer = extract_input_layer(input_layer_df)
    input_layer_config = input_layer.config
    # update input layer config
    input_layer_config.update(input_parameters)
    # update the layer list
    model.layers[0] = InputLayer(**input_layer_config,
                                 name=model.layers[0].name)

    # warning if model weights doesn't exist
    if not conn.tableexists(model.model_weights.name).exists:
        weights_file_path = os.path.join(os.path.dirname(model_file),
                                         model.model_name + '_weights.sashdat')
        print('WARNING: Model weights is not attached '
              'since system cannot find a weights file located at {}'.format(
                  weights_file_path))

    if include_top:
        if n_classes != 1000:
            raise DLPyError(
                "If include_top is enabled, n_classes has to be 1000.")
    else:
        # since the output layer is non fully connected layer,
        # we need to modify the convolution right before the output. The number of filter is set to n_classes.
        conv_layer_df = model_table_df[model_table_df['_DLLayerID_'] == 118]
        conv_layer = extract_conv_layer(conv_layer_df)
        conv_layer_config = conv_layer.config
        # update input layer config
        conv_layer_config.update({'n_filters': n_classes})
        # update the layer list
        model.layers[-2] = Conv2d(**conv_layer_config,
                                  name=model.layers[-2].name,
                                  src_layers=model.layers[-3])

        # overwrite n_classes in output layer
        out_layer_df = model_table_df[model_table_df['_DLLayerID_'] == 119]
        out_layer = extract_output_layer(out_layer_df)
        out_layer_config = out_layer.config
        # update input layer config
        out_layer_config.update({'n': n_classes})
        # update the layer list
        model.layers[-1] = OutputLayer(**out_layer_config,
                                       name=model.layers[-1].name,
                                       src_layers=model.layers[-2])

        # remove top weights
        model.model_weights.append_where('_LayerID_<118')
        model._retrieve_('table.partition',
                         table=model.model_weights,
                         casout=dict(replace=True,
                                     name=model.model_weights.name))
        model.set_weights(model.model_weights.name)
    # recompile the whole network according to the new layer list
    model.compile()
    return model
Esempio n. 28
0
    def _inverted_res_block(inputs, in_channels, expansion, stride, alpha,
                            filters, block_id):
        """
        Inverted Residual Block

        Parameters
        ----------
        inputs:
            Input tensor
        in_channels:
            Specifies the number of input tensor's channel
        expansion:
            expansion factor always applied to the input size.
        stride:
            the strides of the convolution
        alpha:
            width multiplier.
        filters:
            the dimensionality of the output space.
        block_id:
            block id used for naming layers

        """
        pointwise_conv_filters = int(filters * alpha)
        pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
        x = inputs
        prefix = 'block_{}_'.format(block_id)
        n_groups = in_channels

        if block_id:
            # Expand
            n_groups = expansion * in_channels
            x = Conv2d(expansion * in_channels,
                       1,
                       include_bias=False,
                       act='identity',
                       name=prefix + 'expand')(x)
            x = BN(name=prefix + 'expand_BN', act='identity')(x)
        else:
            prefix = 'expanded_conv_'

        # Depthwise
        x = GroupConv2d(n_groups,
                        n_groups,
                        3,
                        stride=stride,
                        act='identity',
                        include_bias=False,
                        name=prefix + 'depthwise')(x)
        x = BN(name=prefix + 'depthwise_BN', act='relu')(x)

        # Project
        x = Conv2d(pointwise_filters,
                   1,
                   include_bias=False,
                   act='identity',
                   name=prefix + 'project')(x)
        x = BN(name=prefix + 'project_BN',
               act='identity')(x)  # identity activation on narrow tensor

        if in_channels == pointwise_filters and stride == 1:
            return Res(name=prefix + 'add')([inputs, x]), pointwise_filters
        return x, pointwise_filters
Esempio n. 29
0
def MobileNetV2(conn,
                model_table='MobileNetV2',
                n_classes=1000,
                n_channels=3,
                width=224,
                height=224,
                norm_stds=(255 * 0.229, 255 * 0.224, 255 * 0.225),
                offsets=(255 * 0.485, 255 * 0.456, 255 * 0.406),
                random_flip=None,
                random_crop=None,
                random_mutation=None,
                alpha=1):
    '''
    Generates a deep learning model with the MobileNetV2 architecture.
    The implementation is revised based on
    https://github.com/keras-team/keras-applications/blob/master/keras_applications/mobilenet_v2.py

    Parameters
    ----------
    conn : CAS
        Specifies the CAS connection object.
    model_table : string or dict or CAS table, optional
        Specifies the CAS table to store the deep learning model.
    n_classes : int, optional
        Specifies the number of classes. If None is assigned, the model will
        automatically detect the number of classes based on the training set.
        Default: 1000
    n_channels : int, optional
        Specifies the number of the channels (i.e., depth) of the input layer.
        Default: 3
    width : int, optional
        Specifies the width of the input layer.
        Default: 224
    height : int, optional
        Specifies the height of the input layer.
        Default: 224
    norm_stds : double or iter-of-doubles, optional
        Specifies a standard deviation for each channel in the input data.
        The final input data is normalized with specified means and standard deviations.
        Default: (255 * 0.229, 255 * 0.224, 255 * 0.225)
    offsets : double or iter-of-doubles, optional
        Specifies an offset for each channel in the input data. The final input
        data is set after applying scaling and subtracting the specified offsets.
        Default: (255*0.485, 255*0.456, 255*0.406)
    random_flip : string, optional
        Specifies how to flip the data in the input layer when image data is
        used. Approximately half of the input data is subject to flipping.
        Valid Values: 'h', 'hv', 'v', 'none'
    random_crop : string, optional
        Specifies how to crop the data in the input layer when image data is
        used. Images are cropped to the values that are specified in the width
        and height parameters. Only the images with one or both dimensions
        that are larger than those sizes are cropped.
        Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
    random_mutation : string, optional
        Specifies how to apply data augmentations/mutations to the data in the input layer.
        Valid Values: 'none', 'random'
    alpha : int, optional
        Specifies the width multiplier in the MobileNet paper
        Default: 1

    alpha : int, optional

    Returns
    -------
    :class:`Model`

    References
    ----------
    https://arxiv.org/abs/1801.04381

    '''
    def _make_divisible(v, divisor, min_value=None):
        # make number of channel divisible
        if min_value is None:
            min_value = divisor
        new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
        # Make sure that round down does not go down by more than 10%.
        if new_v < 0.9 * v:
            new_v += divisor
        return new_v

    def _inverted_res_block(inputs, in_channels, expansion, stride, alpha,
                            filters, block_id):
        """
        Inverted Residual Block

        Parameters
        ----------
        inputs:
            Input tensor
        in_channels:
            Specifies the number of input tensor's channel
        expansion:
            expansion factor always applied to the input size.
        stride:
            the strides of the convolution
        alpha:
            width multiplier.
        filters:
            the dimensionality of the output space.
        block_id:
            block id used for naming layers

        """
        pointwise_conv_filters = int(filters * alpha)
        pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
        x = inputs
        prefix = 'block_{}_'.format(block_id)
        n_groups = in_channels

        if block_id:
            # Expand
            n_groups = expansion * in_channels
            x = Conv2d(expansion * in_channels,
                       1,
                       include_bias=False,
                       act='identity',
                       name=prefix + 'expand')(x)
            x = BN(name=prefix + 'expand_BN', act='identity')(x)
        else:
            prefix = 'expanded_conv_'

        # Depthwise
        x = GroupConv2d(n_groups,
                        n_groups,
                        3,
                        stride=stride,
                        act='identity',
                        include_bias=False,
                        name=prefix + 'depthwise')(x)
        x = BN(name=prefix + 'depthwise_BN', act='relu')(x)

        # Project
        x = Conv2d(pointwise_filters,
                   1,
                   include_bias=False,
                   act='identity',
                   name=prefix + 'project')(x)
        x = BN(name=prefix + 'project_BN',
               act='identity')(x)  # identity activation on narrow tensor

        if in_channels == pointwise_filters and stride == 1:
            return Res(name=prefix + 'add')([inputs, x]), pointwise_filters
        return x, pointwise_filters

    parameters = locals()
    input_parameters = get_layer_options(input_layer_options, parameters)
    inp = Input(**input_parameters, name='data')
    # compared with mobilenetv1, v2 introduces inverted residual structure.
    # and Non-linearities in narrow layers are removed.
    # inverted residual block does three convolutins: first is 1*1 convolution, second is depthwise convolution,
    # third is 1*1 convolution but without any non-linearity
    first_block_filters = _make_divisible(32 * alpha, 8)
    x = Conv2d(first_block_filters,
               3,
               stride=2,
               include_bias=False,
               name='Conv1',
               act='identity')(inp)
    x = BN(name='bn_Conv1', act='relu')(x)

    x, n_channels = _inverted_res_block(x,
                                        first_block_filters,
                                        filters=16,
                                        alpha=alpha,
                                        stride=1,
                                        expansion=1,
                                        block_id=0)

    x, n_channels = _inverted_res_block(x,
                                        n_channels,
                                        filters=24,
                                        alpha=alpha,
                                        stride=2,
                                        expansion=6,
                                        block_id=1)
    x, n_channels = _inverted_res_block(x,
                                        n_channels,
                                        filters=24,
                                        alpha=alpha,
                                        stride=1,
                                        expansion=6,
                                        block_id=2)

    x, n_channels = _inverted_res_block(x,
                                        n_channels,
                                        filters=32,
                                        alpha=alpha,
                                        stride=2,
                                        expansion=6,
                                        block_id=3)
    x, n_channels = _inverted_res_block(x,
                                        n_channels,
                                        filters=32,
                                        alpha=alpha,
                                        stride=1,
                                        expansion=6,
                                        block_id=4)
    x, n_channels = _inverted_res_block(x,
                                        n_channels,
                                        filters=32,
                                        alpha=alpha,
                                        stride=1,
                                        expansion=6,
                                        block_id=5)

    x, n_channels = _inverted_res_block(x,
                                        n_channels,
                                        filters=64,
                                        alpha=alpha,
                                        stride=2,
                                        expansion=6,
                                        block_id=6)
    x, n_channels = _inverted_res_block(x,
                                        n_channels,
                                        filters=64,
                                        alpha=alpha,
                                        stride=1,
                                        expansion=6,
                                        block_id=7)
    x, n_channels = _inverted_res_block(x,
                                        n_channels,
                                        filters=64,
                                        alpha=alpha,
                                        stride=1,
                                        expansion=6,
                                        block_id=8)
    x, n_channels = _inverted_res_block(x,
                                        n_channels,
                                        filters=64,
                                        alpha=alpha,
                                        stride=1,
                                        expansion=6,
                                        block_id=9)

    x, n_channels = _inverted_res_block(x,
                                        n_channels,
                                        filters=96,
                                        alpha=alpha,
                                        stride=1,
                                        expansion=6,
                                        block_id=10)
    x, n_channels = _inverted_res_block(x,
                                        n_channels,
                                        filters=96,
                                        alpha=alpha,
                                        stride=1,
                                        expansion=6,
                                        block_id=11)
    x, n_channels = _inverted_res_block(x,
                                        n_channels,
                                        filters=96,
                                        alpha=alpha,
                                        stride=1,
                                        expansion=6,
                                        block_id=12)

    x, n_channels = _inverted_res_block(x,
                                        n_channels,
                                        filters=160,
                                        alpha=alpha,
                                        stride=2,
                                        expansion=6,
                                        block_id=13)
    x, n_channels = _inverted_res_block(x,
                                        n_channels,
                                        filters=160,
                                        alpha=alpha,
                                        stride=1,
                                        expansion=6,
                                        block_id=14)
    x, n_channels = _inverted_res_block(x,
                                        n_channels,
                                        filters=160,
                                        alpha=alpha,
                                        stride=1,
                                        expansion=6,
                                        block_id=15)

    x, n_channels = _inverted_res_block(x,
                                        n_channels,
                                        filters=320,
                                        alpha=alpha,
                                        stride=1,
                                        expansion=6,
                                        block_id=16)

    # no alpha applied to last conv as stated in the paper:
    # if the width multiplier is greater than 1 we increase the number of output channels
    if alpha > 1.0:
        last_block_filters = _make_divisible(1280 * alpha, 8)
    else:
        last_block_filters = 1280

    x = Conv2d(last_block_filters,
               1,
               include_bias=False,
               name='Conv_1',
               act='identity')(x)
    x = BN(name='Conv_1_bn', act='relu')(x)

    x = GlobalAveragePooling2D(name="Global_avg_pool")(x)
    x = OutputLayer(n=n_classes)(x)

    model = Model(conn, inp, x, model_table)
    model.compile()

    return model
Esempio n. 30
0
def ENet(conn,
         model_table='ENet',
         n_classes=2,
         n_channels=3,
         width=512,
         height=512,
         scale=1.0 / 255,
         norm_stds=None,
         offsets=None,
         random_mutation=None,
         init=None,
         random_flip=None,
         random_crop=None,
         output_image_type=None,
         output_image_prob=False):
    '''
    Generates a deep learning model with the E-Net architecture.

    Parameters
    ----------
    conn : CAS
        Specifies the connection of the CAS connection.
    model_table : string, optional
        Specifies the name of CAS table to store the model.
        Default: ENet
    n_classes : int, optional
        Specifies the number of classes. If None is assigned, the model will
        automatically detect the number of classes based on the training set.
        Default: 2
    n_channels : int, optional
        Specifies the number of the channels (i.e., depth) of the input layer.
        Default: 3
    width : int, optional
        Specifies the width of the input layer.
        Default: 512
    height : int, optional
        Specifies the height of the input layer.
        Default: 512
    scale : double, optional
        Specifies a scaling factor to be applied to each pixel intensity values.
        Default: 1.0/255
    norm_stds : double or iter-of-doubles, optional
        Specifies a standard deviation for each channel in the input data.
        The final input data is normalized with specified means and standard deviations.
    offsets : double or iter-of-doubles, optional
        Specifies an offset for each channel in the input data. The final input
        data is set after applying scaling and subtracting the specified offsets.
    random_mutation : string, optional
        Specifies how to apply data augmentations/mutations to the data in the
        input layer.
        Valid Values: 'none', 'random'
    init : str
        Specifies the initialization scheme for convolution layers.
        Valid Values: XAVIER, UNIFORM, NORMAL, CAUCHY, XAVIER1, XAVIER2, MSRA, MSRA1, MSRA2
        Default: None
    random_flip : string, optional
        Specifies how to flip the data in the input layer when image data is
        used. Approximately half of the input data is subject to flipping.
        Valid Values: 'h', 'hv', 'v', 'none'
    random_crop : string, optional
        Specifies how to crop the data in the input layer when image data is
        used. Images are cropped to the values that are specified in the width
        and height parameters. Only the images with one or both dimensions
        that are larger than those sizes are cropped.
        Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
    output_image_type: string, optional
        Specifies the output image type of this layer.
        possible values: [ WIDE, PNG, BASE64 ]
        default: WIDE
    output_image_prob: bool, options
        Does not include probabilities if doing classification (default).


    Returns
    -------
    :class:`Sequential`

    References
    ----------
    https://arxiv.org/abs/1606.02147

    '''
    parameters = locals()
    input_parameters = get_layer_options(input_layer_options, parameters)
    inp = Input(**input_parameters, name='InputLayer_1')

    # initial
    x = initial_block(inp)

    # stage one
    x = downsampling_bottleneck(x, 16, 64)
    for i in range(4):
        x = regular_bottleneck(x, 64, 64)

    # stage two
    x = downsampling_bottleneck(x, 64, 128)
    for i in range(2):
        x = regular_bottleneck(x, 128, 128)
        x = regular_bottleneck(x, 128, 128)

    # stage three
    for i in range(2):
        x = regular_bottleneck(x, 128, 128)
        x = regular_bottleneck(x, 128, 128)

    # stage four
    x = upsampling_bottleneck(x, 128, 64)
    for i in range(2):
        x = regular_bottleneck(x, 64, 64)

    # stage five
    x = upsampling_bottleneck(x, 64, 16)
    x = regular_bottleneck(x, 16, 16)

    x = upsampling_bottleneck(x, 16, 16)
    conv = Conv2d(n_classes, 3, act='relu')(x)

    seg = Segmentation(name='Segmentation_1',
                       output_image_type=output_image_type,
                       output_image_prob=output_image_prob)(conv)

    model = Model(conn, inputs=inp, outputs=seg)
    model.compile()
    return model