Beispiel #1
0
def create_basic_model(input, out_dims):

    convolutional_layer_1 = Convolution((5, 5),
                                        16,
                                        init=glorot_uniform(),
                                        activation=relu,
                                        pad=True,
                                        strides=(1, 1))(input)
    pooling_layer_1 = MaxPooling((2, 2), strides=(1, 1))(convolutional_layer_1)

    convolutional_layer_2 = Convolution((5, 5),
                                        16,
                                        init=glorot_uniform(),
                                        activation=relu,
                                        pad=True,
                                        strides=(1, 1))(pooling_layer_1)
    pooling_layer_2 = MaxPooling((3, 3), strides=(2, 2))(convolutional_layer_2)
    #
    convolutional_layer_3 = Convolution((9, 9),
                                        16,
                                        init=glorot_uniform(),
                                        activation=relu,
                                        pad=True,
                                        strides=(1, 1))(pooling_layer_2)
    pooling_layer_3 = MaxPooling((3, 3), strides=(2, 2))(convolutional_layer_3)

    fully_connected_layer = Dense(256, init=glorot_uniform())(pooling_layer_3)
    dropout_layer = Dropout(0.5)(fully_connected_layer)

    output_layer = Dense(out_dims, init=glorot_uniform(),
                         activation=None)(dropout_layer)

    return output_layer
Beispiel #2
0
def pre_block(input, bnTimeConst):
    # 32 x 32 x 3
    conv1a = conv_bn_relu_layer(input, 32, (3, 3), (1, 1), True, bnTimeConst)
    # 32 x 32 x 32
    conv1b = conv_bn_relu_layer(conv1a, 32, (3, 3), (1, 1), True, bnTimeConst)
    # 32 x 32 x 32
    conv1c = conv_bn_relu_layer(conv1b, 64, (3, 3), (1, 1), True, bnTimeConst)

    c1 = MaxPooling((3, 3), strides=(1, 1), pad=True)(conv1c)
    c2 = conv_bn_relu_layer(conv1c, 96, (3, 3), (1, 1), True, bnTimeConst)

    d = splice(c1, c2, axis=0)

    e1 = conv_bn_relu_layer(d, 64, (1, 1), (1, 1), True, bnTimeConst)
    e2 = conv_bn_relu_layer(e1, 96, (3, 3), (1, 1), True, bnTimeConst)

    f1 = conv_bn_relu_layer(d, 64, (1, 1), (1, 1), True, bnTimeConst)
    f2 = conv_bn_relu_layer(f1, 64, (3, 1), (1, 1), True, bnTimeConst)
    f3 = conv_bn_relu_layer(f2, 64, (1, 3), (1, 1), True, bnTimeConst)
    f4 = conv_bn_relu_layer(f3, 96, (3, 3), (1, 1), True, bnTimeConst)

    g = splice(e2, f4, axis=0)

    h1 = conv_bn_relu_layer(g, 128, (3, 3), (1, 1), True, bnTimeConst)
    i1 = MaxPooling((3, 3), strides=(1, 1), pad=True)(g)

    out = splice(h1, i1, axis=0)

    return out
def pre_block(input):
    # 32 x 32 x 3
    conv1a = conv_bn_relu(input, (3, 3), 16, (1, 1))
    # 32 x 32 x 32
    conv1b = conv_bn_relu(conv1a, (3, 3), 16, (1, 1))
    # 32 x 32 x 32
    conv1c = conv_bn_relu(conv1b, (3, 3), 16, (1, 1))

    c1 = MaxPooling((3, 3), strides=(1, 1), pad=True)(conv1c)
    c2 = conv_bn_relu(conv1c, (3, 3), 16, (1, 1))

    d = splice(c1, c2, axis=0)

    # 32 x 32 x 32
    e1 = conv_bn_relu(d, (1, 1), 32, (1, 1))
    e2 = conv_bn_relu(e1, (3, 3), 32, (1, 1))

    f1 = conv_bn_relu(d, (1, 1), 32, (1, 1))
    f2 = conv_bn_relu(f1, (3, 1), 32, (1, 1))
    f3 = conv_bn_relu(f2, (1, 3), 32, (1, 1))
    f4 = conv_bn_relu(f3, (3, 3), 32, (1, 1))

    g = splice(e2, f4, axis=0)
    # 32 x 32 x 64
    h1 = conv_bn_relu(g, (3, 3), 64, (1, 1))
    i1 = MaxPooling((3, 3), strides=(1, 1), pad=True)(g)

    out = splice(h1, i1, axis=0)
    # 32 x 32 x 128

    return out
Beispiel #4
0
    def __call__(self,
                 num_classes=10,
                 act_type=relu,
                 mdl_conv1a_nf=40,
                 mdl_conv1b_nf=60,
                 mdl_conv2a_nf=50,
                 mdl_conv2b_nf=75,
                 mdl_fc1_nh=75,
                 mdl_drop2a_p=0.033,
                 mdl_drop2b_p=0.097,
                 mdl_drop3_p=0.412,
                 **kwargs):

        input_var = input_variable((1, self.img_h, self.img_w), np.float32)
        label_var = input_variable((self.n_dim), np.float32)

        conv1a = Convolution(filter_shape=(3, 3), num_filters=int(mdl_conv1a_nf), activation=act_type, init=glorot_uniform(), pad=True, name='conv1a')(input_var)
        conv1b = Convolution(filter_shape=(3, 3), num_filters=int(mdl_conv1b_nf), activation=act_type, init=glorot_uniform(), pad=True, name='conv1b')(conv1a)
        pool1 = MaxPooling(filter_shape=(2, 2), strides=(2, 2), name='pool1')(conv1b)

        conv2a = Convolution(filter_shape=(3, 3), num_filters=int(mdl_conv2a_nf), activation=act_type, init=glorot_uniform(), pad=True, name='conv2a')(pool1)
        drop2a = Dropout(prob=mdl_drop2a_p, name="drop2a")(conv2a)
        conv2b = Convolution(filter_shape=(3, 3), num_filters=int(mdl_conv2b_nf), activation=act_type, init=glorot_uniform(), pad=True, name='conv2b')(drop2a)
        drop2b = Dropout(prob=mdl_drop2a_p, name="drop2a")(conv2b)
        pool2 = MaxPooling(filter_shape=(2, 2), strides=(2, 2), name='pool2')(drop2b)

        fc1 = Dense(shape=int(mdl_fc1_nh), init=glorot_uniform(), activation=act_type, name='fc1')(pool2)
        drop3 = Dropout(prob=mdl_drop3_p, name="drop3")(fc1)
        #fc2 = Dense(shape=num_classes, init=glorot_uniform(), activation=softmax, name='fc2')(drop3)
        fc2 = Dense(shape=num_classes, init=glorot_uniform(), activation=None, name='fc2')(drop3)

        return input_var, label_var, fc2
Beispiel #5
0
    def create_model(self):

        mean_removed_features = minus(self.input,
                                      constant(114),
                                      name='mean_removed_input')

        with default_options(activation=None, pad=True, bias=True):
            self.model = Sequential([
                Convolution2D((11, 11),
                              96,
                              init=normal(0.01),
                              pad=False,
                              name='conv1'),
                Activation(activation=relu, name='relu1'),
                self.__local_response_normalization(1.0,
                                                    2,
                                                    0.0001,
                                                    0.75,
                                                    name='norm1'),
                MaxPooling((3, 3), (2, 2), name='pool1'),
                Convolution2D((5, 5),
                              192,
                              init=normal(0.01),
                              init_bias=0.1,
                              name='conv2'),
                Activation(activation=relu, name='relu2'),
                self.__local_response_normalization(1.0,
                                                    2,
                                                    0.0001,
                                                    0.75,
                                                    name='norm2'),
                MaxPooling((3, 3), (2, 2), name='pool2'),
                Convolution2D((3, 3), 384, init=normal(0.01), name='conv3'),
                Activation(activation=relu, name='relu3'),
                Convolution2D((3, 3),
                              384,
                              init=normal(0.01),
                              init_bias=0.1,
                              name='conv4'),
                Activation(activation=relu, name='relu4'),
                Convolution2D((3, 3),
                              256,
                              init=normal(0.01),
                              init_bias=0.1,
                              name='conv5'),
                Activation(activation=relu, name='relu5'),
                MaxPooling((3, 3), (2, 2), name='pool5'),
                Dense(4096, init=normal(0.005), init_bias=0.1, name='fc6'),
                Activation(activation=relu, name='relu6'),
                Dropout(0.5, name='drop6'),
                Dense(4096, init=normal(0.005), init_bias=0.1, name='fc7'),
                Activation(activation=relu, name='relu7'),
                Dropout(0.5, name='drop7'),
                Dense(self.number_labels, init=normal(0.01), name='fc8')
            ])(mean_removed_features)
Beispiel #6
0
def create_alexnet():

    # Input variables denoting the features and label data
    feature_var = input_variable((num_channels, image_height, image_width))
    label_var = input_variable((num_classes))

    # apply model to input
    # remove mean value
    input = minus(feature_var, constant(114), name='mean_removed_input')

    with default_options(activation=None, pad=True, bias=True):
        z = Sequential([
            # we separate Convolution and ReLU to name the output for feature extraction (usually before ReLU)
            Convolution2D((11,11), 96, init=normal(0.01), pad=False, strides=(4,4), name='conv1'),
            Activation(activation=relu, name='relu1'),
            LocalResponseNormalization(1.0, 2, 0.0001, 0.75, name='norm1'),
            MaxPooling((3,3), (2,2), name='pool1'),

            Convolution2D((5,5), 192, init=normal(0.01), init_bias=0.1, name='conv2'),
            Activation(activation=relu, name='relu2'),
            LocalResponseNormalization(1.0, 2, 0.0001, 0.75, name='norm2'),
            MaxPooling((3,3), (2,2), name='pool2'),

            Convolution2D((3,3), 384, init=normal(0.01), name='conv3'),
            Activation(activation=relu, name='relu3'),
            Convolution2D((3,3), 384, init=normal(0.01), init_bias=0.1, name='conv4'),
            Activation(activation=relu, name='relu4'),
            Convolution2D((3,3), 256, init=normal(0.01), init_bias=0.1, name='conv5'),
            Activation(activation=relu, name='relu5'),
            MaxPooling((3,3), (2,2), name='pool5'),

            Dense(4096, init=normal(0.005), init_bias=0.1, name='fc6'),
            Activation(activation=relu, name='relu6'),
            Dropout(0.5, name='drop6'),
            Dense(4096, init=normal(0.005), init_bias=0.1, name='fc7'),
            Activation(activation=relu, name='relu7'),
            Dropout(0.5, name='drop7'),
            Dense(num_classes, init=normal(0.01), name='fc8')
            ])(input)

    # loss and metric
    ce  = cross_entropy_with_softmax(z, label_var)
    pe  = classification_error(z, label_var)
    pe5 = classification_error(z, label_var, topN=5)

    log_number_of_parameters(z) ; print()

    return {
        'feature': feature_var,
        'label': label_var,
        'ce' : ce,
        'pe' : pe,
        'pe5': pe5,
        'output': z
    }
def bn_inception_model(input, labelDim, bnTimeConst):

    # 224 x 224 x 3
    conv1 = conv_bn_relu_layer(input, 64, (7, 7), (2, 2), True, bnTimeConst)
    # 112 x 112 x 64
    pool1 = MaxPooling(filter_shape=(3, 3), strides=(2, 2), pad=True)(conv1)
    # 56 x 56 x 64
    conv2a = conv_bn_relu_layer(pool1, 64, (1, 1), (1, 1), True, bnTimeConst)
    # 56 x 56 x 64
    conv2b = conv_bn_relu_layer(conv2a, 192, (3, 3), (1, 1), True, bnTimeConst)
    # 56 x 56 x 192
    pool2 = MaxPooling(filter_shape=(3, 3), strides=(2, 2), pad=True)(conv2b)

    # Inception Blocks
    # 28 x 28 x 192
    inception3a = inception_block_with_avgpool(pool2, 64, 64, 64, 64, 96, 32,
                                               bnTimeConst)
    # 28 x 28 x 256
    inception3b = inception_block_with_avgpool(inception3a, 64, 64, 96, 64, 96,
                                               64, bnTimeConst)
    # 28 x 28 x 320
    inception3c = inception_block_pass_through(inception3b, 0, 128, 160, 64,
                                               96, 0, bnTimeConst)
    # 14 x 14 x 576
    inception4a = inception_block_with_avgpool(inception3c, 224, 64, 96, 96,
                                               128, 128, bnTimeConst)
    # 14 x 14 x 576
    inception4b = inception_block_with_avgpool(inception4a, 192, 96, 128, 96,
                                               128, 128, bnTimeConst)
    # 14 x 14 x 576
    inception4c = inception_block_with_avgpool(inception4b, 160, 128, 160, 128,
                                               160, 128, bnTimeConst)
    # 14 x 14 x 576
    inception4d = inception_block_with_avgpool(inception4c, 96, 128, 192, 160,
                                               192, 128, bnTimeConst)
    # 14 x 14 x 576
    inception4e = inception_block_pass_through(inception4d, 0, 128, 192, 192,
                                               256, 0, bnTimeConst)
    # 7 x 7 x 1024
    inception5a = inception_block_with_avgpool(inception4e, 352, 192, 320, 160,
                                               224, 128, bnTimeConst)
    # 7 x 7 x 1024
    inception5b = inception_block_with_maxpool(inception5a, 352, 192, 320, 192,
                                               224, 128, bnTimeConst)

    # Global Average
    # 7 x 7 x 1024
    pool3 = AveragePooling(filter_shape=(7, 7))(inception5b)
    # 1 x 1 x 1024
    z = Dense(labelDim, init=he_normal())(pool3)

    return z
Beispiel #8
0
def inceptionv1_cifar_model2(input, labelDim, bnTimeConst):

    # 32 x 32 x 3
    conv1 = conv_bn_relu_layer(input, 32, (3, 3), (1, 1), True, bnTimeConst)
    # 32 x 32 x 32
    conv2 = conv_bn_relu_layer(conv1, 32, (3, 3), (1, 1), True, bnTimeConst)

    # Inception Blocks
    # 32 x 32 x 64
    inception3a = inception_block_with_maxpool(conv2, 32, 32, 32, 32, 48, 16,
                                               bnTimeConst)
    # 32 x 32 x 128
    inception3b = inception_block_with_maxpool(inception3a, 32, 32, 32, 32, 48,
                                               16, bnTimeConst)

    maxpool1 = MaxPooling((3, 3), strides=(2, 2), pad=True)(inception3b)

    # 16 x 16 x 128
    inception4a = inception_block_with_maxpool(maxpool1, 96, 48, 64, 48, 64,
                                               64, bnTimeConst)
    # 16 x 16 x 288
    inception4b = inception_block_with_maxpool(inception4a, 96, 48, 64, 48, 64,
                                               64, bnTimeConst)
    # 16 x 16 x 288
    inception4c = inception_block_with_maxpool(inception4b, 96, 48, 64, 48, 64,
                                               64, bnTimeConst)
    # 16 x 16 x 288
    inception4d = inception_block_with_maxpool(inception4c, 96, 48, 64, 48, 64,
                                               64, bnTimeConst)
    # 16 x 16 x 288
    inception4e = inception_block_with_maxpool(inception4d, 96, 48, 64, 48, 64,
                                               64, bnTimeConst)

    maxpool2 = MaxPooling((3, 3), strides=(2, 2), pad=True)(inception4e)

    # 8 x 8 x 288
    inception5a = inception_block_with_maxpool(inception4e, 176, 96, 160, 96,
                                               112, 64, bnTimeConst)
    # 8 x 8 x 512
    inception5b = inception_block_with_maxpool(inception5a, 176, 96, 160, 96,
                                               112, 64, bnTimeConst)

    # Global Average
    # 8 x 8 x 512
    pool1 = AveragePooling(filter_shape=(8, 8))(inception5b)
    # 1 x 1 x 512

    z = Dense(labelDim, init=he_normal())(pool1)

    return z
    def create_convolutional_neural_network(input_vars, out_dims):

        convolutional_layer_1 = Convolution((5, 5),
                                            32,
                                            strides=1,
                                            activation=cntk.ops.relu,
                                            pad=True,
                                            init=glorot_normal(),
                                            init_bias=0.1)
        pooling_layer_1 = MaxPooling((2, 2), strides=(2, 2), pad=True)

        convolutional_layer_2 = Convolution((5, 5),
                                            64,
                                            strides=1,
                                            activation=cntk.ops.relu,
                                            pad=True,
                                            init=glorot_normal(),
                                            init_bias=0.1)
        pooling_layer_2 = MaxPooling((2, 2), strides=(2, 2), pad=True)

        convolutional_layer_3 = Convolution((5, 5),
                                            128,
                                            strides=1,
                                            activation=cntk.ops.relu,
                                            pad=True,
                                            init=glorot_normal(),
                                            init_bias=0.1)
        pooling_layer_3 = MaxPooling((2, 2), strides=(2, 2), pad=True)

        fully_connected_layer = Dense(1024,
                                      activation=cntk.ops.relu,
                                      init=glorot_normal(),
                                      init_bias=0.1)

        output_layer = Dense(out_dims,
                             activation=None,
                             init=glorot_normal(),
                             init_bias=0.1)

        model = Sequential([
            convolutional_layer_1,
            pooling_layer_1,
            convolutional_layer_2,
            pooling_layer_2,
            #convolutional_layer_3, pooling_layer_3,
            fully_connected_layer,
            output_layer
        ])(input_vars)
        return model
Beispiel #10
0
def create_imagenet_model_bottleneck(input, num_stack_layers, num_classes,
                                     stride1x1, stride3x3):
    c_map = [64, 128, 256, 512, 1024, 2048]

    # conv1 and max pooling
    conv1 = conv_bn_relu(input, (7, 7), c_map[0], strides=(2, 2))
    pool1 = MaxPooling((3, 3), strides=(2, 2), pad=True)(conv1)

    # conv2_x
    r2_1 = resnet_bottleneck_inc(pool1, c_map[2], c_map[0], (1, 1), (1, 1))
    r2_2 = resnet_bottleneck_stack(r2_1, num_stack_layers[0], c_map[2],
                                   c_map[0])

    # conv3_x
    r3_1 = resnet_bottleneck_inc(r2_2, c_map[3], c_map[1], stride1x1,
                                 stride3x3)
    r3_2 = resnet_bottleneck_stack(r3_1, num_stack_layers[1], c_map[3],
                                   c_map[1])

    # conv4_x
    r4_1 = resnet_bottleneck_inc(r3_2, c_map[4], c_map[2], stride1x1,
                                 stride3x3)
    r4_2 = resnet_bottleneck_stack(r4_1, num_stack_layers[2], c_map[4],
                                   c_map[2])

    # conv5_x
    r5_1 = resnet_bottleneck_inc(r4_2, c_map[5], c_map[3], stride1x1,
                                 stride3x3)
    r5_2 = resnet_bottleneck_stack(r5_1, num_stack_layers[3], c_map[5],
                                   c_map[3])

    # Global average pooling and output
    pool = AveragePooling(filter_shape=(7, 7), name='final_avg_pooling')(r5_2)
    z = Dense(num_classes, init=C.normal(0.01))(pool)
    return z
def create_convolutional_neural_network(input_vars, out_dims, dropout_prob=0.0):

    convolutional_layer_1 = Convolution((5, 5), 32, strides=1, activation=cntk.ops.relu, pad=True, init=gaussian(), init_bias=0.1)(input_vars)
    pooling_layer_1 = MaxPooling((2, 2), strides=(2, 2), pad=True)(convolutional_layer_1)

    convolutional_layer_2 = Convolution((5, 5), 64, strides=1, activation=cntk.ops.relu, pad=True, init=gaussian(), init_bias=0.1)(pooling_layer_1)
    pooling_layer_2 = MaxPooling((2, 2), strides=(2, 2), pad=True)(convolutional_layer_2)

    convolutional_layer_3 = Convolution((5, 5), 128, strides=1, activation=cntk.ops.relu, pad=True, init=gaussian(), init_bias=0.1)(pooling_layer_2)
    pooling_layer_3 = MaxPooling((2, 2), strides=(2, 2), pad=True)(convolutional_layer_3)

    fully_connected_layer = Dense(1024, activation=cntk.ops.relu, init=gaussian(), init_bias=0.1)(pooling_layer_3)
    dropout_layer = Dropout(dropout_prob)(fully_connected_layer)
    output_layer = Dense(out_dims, activation=None, init=gaussian(), init_bias=0.1)(dropout_layer)

    return output_layer
Beispiel #12
0
def test_layers_conv_pool_unpool_deconv():
    pass
    inC, inH, inW = 1,4,4

    y = input((inC,inH, inW))

    cMap = 1

    zero_pad = True
    conv_init = 1
    filter_shape = (2,2)
    pooling_strides = (2,2)

    dat = np.arange(0,16, dtype=np.float32).reshape(1,1,4,4)

    conv = Convolution(filter_shape, cMap, pad=zero_pad, init=conv_init,activation=None)(y)

    pool = MaxPooling(filter_shape, pooling_strides)(conv)

    unpool = MaxUnpooling(filter_shape, pooling_strides)(pool, conv)

    z = ConvolutionTranspose(filter_shape, cMap, init=conv_init, pad=zero_pad)(unpool)

    assert z.shape == y.shape

    res = z(dat)

    expected_res = np.asarray([[30, 64, 34], [76, 160, 84], [46, 96, 50]], np.float32)

    np.testing.assert_array_almost_equal(res[0][0][1:,1:], expected_res, decimal=6,
        err_msg="Wrong values in conv/pooling/unpooling/conv_transposed")
def test_depth_first_search_blocks(depth, prefix_count):
    from cntk.layers import Sequential, Convolution, MaxPooling, Dense
    from cntk.default_options import default_options

    def Blocked_Dense(dim, activation=None):
        dense = Dense(dim, activation=activation)

        @C.layers.BlockFunction('blocked_dense', 'blocked_dense')
        def func(x):
            return dense(x)

        return func

    with default_options(activation=C.relu):
        image_to_vec = Sequential([
            Convolution((5, 5), 32, pad=True),
            MaxPooling((3, 3), strides=(2, 2)),
            Dense(10, activation=None),
            Blocked_Dense(10)
        ])

    in1 = C.input_variable(shape=(3, 256, 256), name='image')
    img = image_to_vec(in1)

    found = C.logging.graph.depth_first_search(img,
                                               lambda x: True,
                                               depth=depth)
    found_str = [str(v) for v in found]

    assert len(found) == sum(prefix_count.values())
    for prefix, count in prefix_count.items():
        assert sum(f.startswith(prefix) for f in found_str) == count
Beispiel #14
0
def inception_block_with_maxpool(input, num1x1, num3x3r, num3x3, num3x3dblr,
                                 num3x3dbl, numPool, bnTimeConst):

    # 1x1
    branch1x1 = conv_bn_relu_layer(input, num1x1, (1, 1), (1, 1), True,
                                   bnTimeConst)

    # 1x1 -> 3x3
    branch3x3_reduce = conv_bn_relu_layer(input, num3x3r, (1, 1), (1, 1), True,
                                          bnTimeConst)
    branch3x3 = conv_bn_relu_layer(branch3x3_reduce, num3x3, (3, 3), (1, 1),
                                   True, bnTimeConst)

    # 1x1 -> 3x3 -> 3x3
    branch3x3dbl_reduce = conv_bn_relu_layer(input, num3x3dblr, (1, 1), (1, 1),
                                             True, bnTimeConst)
    branch3x3dbl_conv = conv_bn_relu_layer(branch3x3dbl_reduce, num3x3dbl,
                                           (3, 3), (1, 1), True, bnTimeConst)
    branch3x3dbl = conv_bn_relu_layer(branch3x3dbl_conv, num3x3dbl, (3, 3),
                                      (1, 1), True, bnTimeConst)

    # max pooling -> 1x1
    branchPool_maxpool = MaxPooling((3, 3), strides=(1, 1), pad=True)(input)
    branchPool = conv_bn_relu_layer(branchPool_maxpool, numPool, (1, 1),
                                    (1, 1), True, bnTimeConst)

    out = splice(branch1x1, branch3x3, branch3x3dbl, branchPool, axis=0)

    return out
Beispiel #15
0
def create_shallow_model(input, out_dims):
    
    convolutional_layer_1_1  = Convolution((7,7), 32, init=glorot_uniform(), activation=relu, pad=True, strides=(1,1))(input)
    convolutional_layer_1_2  = Convolution((25,25), 32, init=glorot_uniform(), activation=relu, pad=True, strides=(1,1))(convolutional_layer_1_1)

    pooling_layer_1  = MaxPooling((25,25), strides=(5,5))(convolutional_layer_1_2 )
    
    convolutional_layer_2_1 = Convolution((3,3), 32, init=glorot_uniform(), activation=relu, pad=True, strides=(1,1))(pooling_layer_1)
    pooling_layer_2 = MaxPooling((2,2), strides=(2,2))(convolutional_layer_2_1)
 
    fully_connected_layer_1  = Dense(512, init=glorot_uniform())(pooling_layer_2)   
    fully_connected_layer_2  = Dense(128, init=glorot_uniform())(fully_connected_layer_1)
    dropout_layer = Dropout(0.5)(fully_connected_layer_2)

    output_layer = Dense(out_dims, init=glorot_uniform(), activation=None)(dropout_layer)
    
    return output_layer
Beispiel #16
0
def create_vgg16(feature_var, num_classes, dropout=0.9):

    with default_options(activation=None, pad=True, bias=True):
        z = Sequential([
            # we separate Convolution and ReLU to name the output for feature
            # extraction (usually before ReLU)
            For(
                range(2), lambda i: [
                    Convolution2D((3, 3), 64, name='conv1_{}'.format(i)),
                    Activation(activation=relu, name='relu1_{}'.format(i)),
                ]),
            MaxPooling((2, 2), (2, 2), name='pool1'),
            For(
                range(2), lambda i: [
                    Convolution2D((3, 3), 128, name='conv2_{}'.format(i)),
                    Activation(activation=relu, name='relu2_{}'.format(i)),
                ]),
            MaxPooling((2, 2), (2, 2), name='pool2'),
            For(
                range(3), lambda i: [
                    Convolution2D((3, 3), 256, name='conv3_{}'.format(i)),
                    Activation(activation=relu, name='relu3_{}'.format(i)),
                ]),
            MaxPooling((2, 2), (2, 2), name='pool3'),
            For(
                range(3), lambda i: [
                    Convolution2D((3, 3), 512, name='conv4_{}'.format(i)),
                    Activation(activation=relu, name='relu4_{}'.format(i)),
                ]),
            MaxPooling((2, 2), (2, 2), name='pool4'),
            For(
                range(3), lambda i: [
                    Convolution2D((3, 3), 512, name='conv5_{}'.format(i)),
                    Activation(activation=relu, name='relu5_{}'.format(i)),
                ]),
            MaxPooling((2, 2), (2, 2), name='pool5'),
            Dense(4096, name='fc6'),
            Activation(activation=relu, name='relu6'),
            Dropout(dropout, name='drop6'),
            Dense(4096, name='fc7'),
            Activation(activation=relu, name='relu7'),
            Dropout(dropout, name='drop7'),
            Dense(num_classes, name='fc8')
        ])(feature_var)

    return z
def pre_block(input):
    c1 = conv_bn_relu(input, (3,3), 32, (1,1))
    c2 = conv_bn_relu(c1, (3,3), 32, (1,1))
    c3 = conv_bn_relu(c2, (3,3), 64, (1,1))
    c4 = MaxPooling(filter_shape = (3,3), strides = (1,1), pad = True, name = 'pool')(c3)

    c5 = conv_bn_relu(c4, (1,1), 80, (1,1))
    c6 = conv_bn_relu(c5, (3,3), 128, (1,1))
    c7 = conv_bn_relu(c6, (3,3), 128, (1,1))

    return c7
def create_symbol():
    # Weight initialiser from uniform distribution
    # Activation (unless states) is None
    with cntk.layers.default_options(init=cntk.glorot_uniform(),
                                     activation=cntk.relu):
        x = Convolution2D(filter_shape=(3, 3), num_filters=50,
                          pad=True)(features)
        x = Convolution2D(filter_shape=(3, 3), num_filters=50, pad=True)(x)
        x = MaxPooling((2, 2), strides=(2, 2), pad=False)(x)
        x = Dropout(0.25)(x)

        x = Convolution2D(filter_shape=(3, 3), num_filters=100, pad=True)(x)
        x = Convolution2D(filter_shape=(3, 3), num_filters=100, pad=True)(x)
        x = MaxPooling((2, 2), strides=(2, 2), pad=False)(x)
        x = Dropout(0.25)(x)

        x = Dense(512)(x)
        x = Dropout(0.5)(x)
        x = Dense(N_CLASSES, activation=None)(x)
        return x
Beispiel #19
0
def reduction_A(input, b1, c1, c2, c3, bnTimeConst):
    A1 = MaxPooling((3, 3), strides=(2, 2), pad=True)(input)

    B1 = conv_bn_relu_layer(input, b1, (3, 3), (2, 2), True, bnTimeConst)

    C1 = conv_bn_relu_layer(input, c1, (1, 1), (1, 1), True, bnTimeConst)
    C2 = conv_bn_relu_layer(C1, c2, (3, 3), (1, 1), True, bnTimeConst)
    C3 = conv_bn_relu_layer(C2, c3, (3, 3), (2, 2), True, bnTimeConst)

    out = splice(A1, B1, C3, axis=0)

    return out
def reduction_A(input, b1, c1, c2, c3):
    A1 = MaxPooling((3, 3), strides=(2, 2), pad=True)(input)

    B1 = conv_bn_relu(input, (3, 3), b1, (2, 2))

    C1 = conv_bn_relu(input, (1, 1), c1, (1, 1))
    C2 = conv_bn_relu(C1, (3, 3), c2, (1, 1))
    C3 = conv_bn_relu(C2, (3, 3), c3, (2, 2))

    out = splice(A1, B1, C3, axis=0)

    return out
def create_convnet_cifar10_model(num_classes):
    with default_options(activation=relu, pad=True):
        return Sequential([
            For(
                range(2), lambda: [
                    Convolution2D((3, 3), 64),
                    Convolution2D((3, 3), 64),
                    MaxPooling((3, 3), strides=2)
                ]),
            For(range(2), lambda i: [Dense([256, 128][i]),
                                     Dropout(0.5)]),
            Dense(num_classes, activation=None)
        ])
def create_model(feature_dimensions, classes):
    with default_options(activation=relu, init=glorot_uniform()):
        model = Sequential([
            For(
                range(3), lambda i: [
                    Convolution((5, 5), [32, 32, 64][i], pad=True),
                    BatchNormalization(map_rank=1),
                    MaxPooling((3, 3), strides=(2, 2))
                ]),
            Dense(64),
            BatchNormalization(map_rank=1),
            Dense(len(classes), activation=None)
        ])

    return model(feature_dimensions)
Beispiel #23
0
def create_vgg9_model(input, num_classes):
    with default_options(activation=relu):
        model = Sequential([
            LayerStack(3, lambda i: [
                Convolution((3,3), [64,96,128][i], init=glorot_uniform(), pad=True),
                Convolution((3,3), [64,96,128][i], init=glorot_uniform(), pad=True),
                MaxPooling((3,3), strides=(2,2))
            ]),
            LayerStack(2, lambda : [
                Dense(1024, init=glorot_uniform())
            ]),
            Dense(num_classes, init=glorot_uniform(), activation=None)
        ])

    return model(input)
Beispiel #24
0
def inception_block_2(input, num3x3, num3x3dbl, bnTimeConst):

    # 3x3 Convolution
    branch3x3 = conv_bn_relu_layer(input, num3x3, (3,3), (2,2), True, bnTimeConst)

    # Double 3x3 Convolution
    branch3x3dbl_1 = conv_bn_relu_layer(input, num3x3dbl[0], (1,1), (1,1), True, bnTimeConst)
    branch3x3dbl_2 = conv_bn_relu_layer(branch3x3dbl_1, num3x3dbl[1], (3,3), (1,1), True, bnTimeConst)
    branch3x3dbl   = conv_bn_relu_layer(branch3x3dbl_2, num3x3dbl[2], (3,3), (2,2), True, bnTimeConst)

    # Max Pooling
    branchPool = MaxPooling((3,3), strides=(2,2), pad=True)(input)

    out = splice(branch3x3, branch3x3dbl, branchPool, axis=0)

    return out
def reduction_B(input, b1, b2, c1, c2, d1, d2, d3):
    A1 = MaxPooling(filter_shape=(3, 3), strides=(2, 2), pad=True)(input)

    B1 = conv_bn_relu(input, (1, 1), b1, (1, 1))
    B2 = conv_bn_relu(B1, (3, 3), b2, (2, 2))

    C1 = conv_bn_relu(input, (1, 1), c1, (1, 1))
    C2 = conv_bn_relu(C1, (3, 3), c2, (2, 2))

    D1 = conv_bn_relu(input, (1, 1), d1, (1, 1))
    D2 = conv_bn_relu(D1, (3, 3), d2, (1, 1))
    D3 = conv_bn_relu(D2, (3, 3), d3, (2, 2))

    out = splice(A1, B2, C2, D3, axis=0)

    return out
Beispiel #26
0
def inception_block_pass_through(input, num1x1, num3x3r, num3x3, num3x3dblr, num3x3dbl, numPool, bnTimeConst):
    
    # 3x3 Convolution
    branch3x3_reduce = conv_bn_relu_layer(input, num3x3r, (1,1), (1,1), True, bnTimeConst)
    branch3x3 = conv_bn_relu_layer(branch3x3_reduce, num3x3, (3,3), (2,2), True, bnTimeConst)

    # Double 3x3 Convolution
    branch3x3dbl_reduce = conv_bn_relu_layer(input, num3x3dblr, (1,1), (1,1), True, bnTimeConst)
    branch3x3dbl_conv = conv_bn_relu_layer(branch3x3dbl_reduce, num3x3dbl, (3,3), (1,1), True, bnTimeConst)
    branch3x3dbl = conv_bn_relu_layer(branch3x3dbl_conv, num3x3dbl, (3,3), (2,2), True, bnTimeConst)

    # Max Pooling
    branchPool = MaxPooling((3,3), strides=(2,2), pad=True)(input)

    out = splice(branch3x3, branch3x3dbl, branchPool, axis=0)

    return out
Beispiel #27
0
def inception_block_4(input, num3x3, num7x7_3x3, bnTimeConst):

    # 3x3 Convolution
    branch3x3_1 = conv_bn_relu_layer(input, num3x3[0], (1,1), (1,1), True, bnTimeConst)
    branch3x3   = conv_bn_relu_layer(branch3x3_1, num3x3[1], (3,3), (2,2), False, bnTimeConst)

    # 7x7 3x3 Convolution
    branch7x7_3x3_1 = conv_bn_relu_layer(input, num7x7_3x3[0], (1,1), (1,1), True, bnTimeConst)
    branch7x7_3x3_2 = conv_bn_relu_layer(branch7x7_3x3_1, num7x7_3x3[1], (1,7), (1,1), True, bnTimeConst)
    branch7x7_3x3_3 = conv_bn_relu_layer(branch7x7_3x3_2, num7x7_3x3[2], (7,1), (1,1), True, bnTimeConst)
    branch7x7_3x3   = conv_bn_relu_layer(branch7x7_3x3_3, num7x7_3x3[3], (3,3), (2,2), False, bnTimeConst)

    # Max Pooling
    branchPool = MaxPooling((3,3), strides=(2,2), pad=False)(input)

    out = splice(branch3x3, branch7x7_3x3, branchPool, axis=0)

    return out
Beispiel #28
0
def create_advanced_model(input, out_dims):
    
    with default_options(activation=relu):
        model = Sequential([
            For(range(2), lambda i: [  # lambda with one parameter
                Convolution((3,3), [32,64][i], pad=True),  # depth depends on i
                Convolution((5,5), [32,64][i], pad=True),
                Convolution((9,9), [32,64][i], pad=True),            
                MaxPooling((3,3), strides=(2,2))
            ]),
            For(range(2), lambda : [   # lambda without parameter
                Dense(512),
                Dropout(0.5)
            ]),
            Dense(out_dims, activation=None)
        ])
    output_layer=model(input)
    
    return output_layer
Beispiel #29
0
def test_depth_first_search_blocks(depth, prefix_count):
    from cntk.layers import Sequential, Convolution, MaxPooling, Dense
    from cntk.default_options import default_options

    with default_options(activation=relu):
        image_to_vec = Sequential([
            Convolution((5, 5), 32, pad=True),
            MaxPooling((3, 3), strides=(2, 2)),
            Dense(10, activation=None)
        ])

    in1 = input(shape=(3, 256, 256), name='image')
    img = image_to_vec(in1)

    found = depth_first_search(img, lambda x: True, depth=depth)
    found_str = [str(v) for v in found]

    assert len(found) == sum(prefix_count.values())
    for prefix, count in prefix_count.items():
        assert sum(f.startswith(prefix) for f in found_str) == count
Beispiel #30
0
def create_resnet34(input, num_classes):
    c_map = [64, 128, 256, 512]
    num_layers = [3, 3, 5, 2]

    conv = conv_bn_relu(input, (7, 7), c_map[0], (2, 2))
    maxPool = MaxPooling((3, 3), (2, 2), pad=True)(conv)
    r1 = resnet_basic_stack(maxPool, num_layers[0], c_map[0])

    r2_1 = resnet_basic_inc(r1, c_map[1])
    r2_2 = resnet_basic_stack(r2_1, num_layers[1], c_map[1])

    r3_1 = resnet_basic_inc(r2_2, c_map[2])
    r3_2 = resnet_basic_stack(r3_1, num_layers[2], c_map[2])

    r4_1 = resnet_basic_inc(r3_2, c_map[3])
    r4_2 = resnet_basic_stack(r4_1, num_layers[3], c_map[3])

    # Global average pooling and output
    pool = AveragePooling(filter_shape=(7, 7))(r4_2)
    z = Dense(num_classes)(pool)
    return z