def customCNN(input_shape: Tuple[int, ...], output_shape: Tuple[int,
                                                                ...]) -> Model:
    """Creates a custom cnn model 
    
    Args:
    input_shape : shape of the input tensor
    num_classes : number of classes

    Returns:
    CustomCNN Model
    """
    num_classes = output_shape[0]
    model = Sequential()
    if len(input_shape) < 3:
        model.add(
            Lambda(lambda x: K.expand_dims(x, -1), input_shape=input_shape))
        input_shape = (input_shape[0], input_shape[1], 1)
    #Input (28, 28, 1)  -> Output (13, 13, 32)
    model.add(Conv2D(32, (3, 3)))
    model.add(ReLU())
    model.add(BatchNormalization())
    model.add(MaxPooling2D())
    #Input (13, 13, 32)  -> Output (11, 11, 64)
    model.add(Conv2D(64, (3, 3)))
    model.add(ReLU())
    model.add(BatchNormalization())
    #Input (11, 11, 64)  -> Output (4, 4, 64)
    model.add(Conv2D(64, (3, 3)))
    model.add(ReLU())
    model.add(MaxPooling2D())
    #Input (4, 4, 64)  -> Output (1024,)
    model.add(Flatten())
    model.add(BatchNormalization())
    #Input (1024,)  -> Output (128,)
    model.add(Dense(128))
    model.add(ReLU())
    model.add(BatchNormalization())
    model.add(Dropout(0.2))
    #Input (128,)  -> Output (num_classes,)
    model.add(Dense(num_classes, activation='softmax'))
    model.summary()

    return model
Beispiel #2
0
def __conv2d_block(_inputs, filters, kernel, strides, is_use_bias=False, padding='same', activation='RE', name=None):
    x = Conv2D(filters, kernel, strides= strides, padding=padding, use_bias=is_use_bias)(_inputs)
    x = BatchNormalization()(x)
    if activation == 'RE':
        x = ReLU(name=name)(x)
    elif activation == 'HS':
        x = Activation(Hswish, name=name)(x)
    else:
        raise NotImplementedError
    return x
Beispiel #3
0
def upsampleBlock(stage1, units):
    stage1 = UpSampling2D(size=(2, 2))(stage1)
    stage1 = Conv2D(units,
                    kernel_size=3,
                    padding="same",
                    strides=1,
                    use_bias=False)(stage1)
    stage1 = BatchNormalization()(stage1)
    stage1 = ReLU()(stage1)
    return stage1
Beispiel #4
0
def build_embedding_compressor_model():
    """
    Build embedding compressor model
    """
    input_layer = Input(shape=(1024, ))
    x = Dense(128)(input_layer)
    x = ReLU()(x)

    model = Model(inputs=[input_layer], outputs=[x])
    return model
Beispiel #5
0
def conv_layer(n_filters, filter_size, conv):
    for _ in range(conv_per_layer):
        conv = Conv2D(n_filters, filter_size, padding='same')(conv)
        if is_batch_norm:
            conv = BatchNormalization()(conv)
        if is_leaky_relu:
            conv = LeakyReLU()(conv)
        else:
            conv = ReLU()(conv)
    return conv
Beispiel #6
0
def Conv_BN(x, n_filters, kernel_size, strides, activation=None):
    x = Conv2D(n_filters,
               kernel_size,
               strides=strides,
               padding='same',
               use_bias=False)(x)
    x = BatchNormalization()(x)
    if activation:
        x = ReLU()(x)
    return x
def Conv_BN(inputs, filters, kernel_size, strides=1, dilation_rate=1):
    x = Conv2D(filters=filters,
               kernel_size=kernel_size,
               strides=strides,
               dilation_rate=dilation_rate,
               padding='same')(inputs)
    x = BatchNormalization(epsilon=1e-3, momentum=0.993)(x)
    x = ReLU()(x)

    return x
    def create_net(self, input_shape):
        anchor_input = Input(shape=input_shape)
        pos_input = Input(shape=input_shape)
        neg_input = Input(shape=input_shape)

        dropout_rate = 0.5

        triplet_model_branch_sequence = [
            Conv2D(32, (3, 3), padding='same'),
            Dropout(rate=dropout_rate),
            BatchNormalization(),
            ReLU(),
            MaxPooling2D((2, 2), padding='same'),
            Conv2D(64, (3, 3), padding='same'),
            Dropout(rate=dropout_rate),
            BatchNormalization(),
            ReLU(),
            MaxPooling2D((2, 2), padding='same'),
            Conv2D(128, (3, 3), padding='same'),
            Dropout(rate=dropout_rate),
            BatchNormalization(),
            ReLU(),
            MaxPooling2D((2, 2), padding='same'),
            Flatten(),
            Dense(64, activation='sigmoid')
        ]

        branch = Sequential(triplet_model_branch_sequence)

        anchor_embedding = branch(anchor_input)
        pos_embedding = branch(pos_input)
        neg_embedding = branch(neg_input)

        input = [anchor_input, pos_input, neg_input]

        # Keep the anchor branch as embedding model for predictions
        self.embedding = Model(anchor_input, anchor_embedding)
        # Output a list containing the embeddings
        output = concatenate([anchor_embedding, pos_embedding, neg_embedding],
                             axis=-1)

        self.model = Model(input, output)
        return
Beispiel #9
0
def get_model(input_shape):
    kernel_size = 5
    model = Sequential([
        InputLayer(input_shape=input_shape),
        Conv2D(32, kernel_size),
        BatchNormalization(),
        ReLU(),
        MaxPooling2D(pool_size=(3, 3), strides=(2, 2)),
        Conv2D(64, kernel_size, input_shape=input_shape),
        BatchNormalization(),
        ReLU(),
        MaxPooling2D(pool_size=(3, 3), strides=(2, 2)),
        Conv2D(512, kernel_size, input_shape=input_shape),
        BatchNormalization(),
        ReLU(),
        GlobalAveragePooling2D(),
        Dense(5, activation='softmax'),
    ])
    return model
Beispiel #10
0
def Conv_BN(x, n_filters, kernel_size, strides, group=1, activation=None):
    x = GroupConv2D(n_filters,
                    kernel_size,
                    strides=strides,
                    padding='same',
                    groups=group)(x)
    x = BatchNormalization()(x)
    if activation:
        x = ReLU()(x)
    return x
Beispiel #11
0
    def _inverted_res_block(self, n_out, net, expansion, stride, alpha,
                            block_id):
        snet = net
        n_in = net.get_shape().as_list()[-1]
        prefix = 'features.' + str(block_id) + '.conv.'
        pointwise_conv_filters = int(n_out * alpha)
        pointwise_filters = self._make_divisible(pointwise_conv_filters, 8)
        # Expand

        net = Conv2D(expansion * n_in,
                     kernel_size=1,
                     padding='same',
                     use_bias=False,
                     activation=None,
                     name='mobl%d_conv_expand' % block_id)(net)
        net = BatchNormalization(epsilon=1e-3, momentum=0.999)(net)
        net = ReLU(max_value=6.0)(net)

        # Depthwise
        net = DepthwiseConv2D(kernel_size=3,
                              strides=stride,
                              activation=None,
                              use_bias=False,
                              padding='same',
                              name='mobl%d_conv_depthwise' % block_id)(net)
        net = BatchNormalization(epsilon=1e-3, momentum=0.999)(net)

        net = ReLU(max_value=6.0)(net)

        # Project
        net = Conv2D(pointwise_filters,
                     kernel_size=1,
                     padding='same',
                     use_bias=False,
                     activation=None,
                     name='mobl%d_conv_project' % block_id)(net)
        net = BatchNormalization(epsilon=1e-3, momentum=0.999)(net)

        if n_in == pointwise_filters and stride == 1:
            return Add(name='res_connect_' + str(block_id))([snet, net])

        return net
Beispiel #12
0
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id, skip_connection, rate=1):
    in_channels = inputs._keras_shape[-1]
    pointwise_conv_filters = int(filters * alpha)
    # pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
    pointwise_filters=pointwise_conv_filters
    x = inputs
    prefix = 'expanded_conv_{}/'.format(block_id)
    if block_id:
        # Expand

        x = Conv2D(expansion * in_channels, kernel_size=1, padding='same',
                   use_bias=False, activation=None,
                   name=prefix + 'expand')(x)
        x = BatchNormalization(epsilon=1e-3, momentum=0.999,
                               name=prefix + 'expand/BatchNorm')(x)
        # x = Activation(relu6, name=prefix + 'expand_relu')(x)
        x = ReLU(6., name=prefix + 'expand_relu')(x)
    else:
        prefix = 'expanded_conv/'
    # Depthwise
    x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None,
                        use_bias=False, padding='same', dilation_rate=(rate, rate),
                        name=prefix + 'depthwise')(x)
    x = BatchNormalization(epsilon=1e-3, momentum=0.999,
                           name=prefix + 'depthwise/BatchNorm')(x)

    # x = Activation(relu6, name=prefix + 'depthwise_relu')(x)
    x = ReLU(6., name=prefix + 'depthwise_relu')(x)
    # Project
    x = Conv2D(pointwise_filters,
               kernel_size=1, padding='same', use_bias=False, activation=None,
               name=prefix + 'project')(x)
    x = BatchNormalization(epsilon=1e-3, momentum=0.999,
                           name=prefix + 'project/BatchNorm')(x)

    if skip_connection:
        return Add(name=prefix + 'add')([inputs, x])

    # if in_channels == pointwise_filters and stride == 1:
    #    return Add(name='res_connect_' + str(block_id))([inputs, x])

    return x
Beispiel #13
0
def depthwise_conv_block(inputs,
                         pointwise_conv_filters,
                         width_multiplier,
                         depth_multiplier=1,
                         strides=(1, 1),
                         block_id=1):
    """
    depth_multiplier: The number of depthwise convolution output channels
            for each input channel.
    The total number of depthwise convolution output
    channels will be equal to `filters_in * depth_multiplier`.
    """

    #Update the Number of Output Filters
    pointwise_conv_filters = int(pointwise_conv_filters * width_multiplier)

    if strides == (1, 1):
        x = inputs
    else:
        x = ZeroPadding2D(padding=((0, 1), (0, 1)),
                          name='conv_pad_%d' % block_id)(inputs)

    # Depth Wise Convolution
    x = DepthwiseConv2D((3, 3),
                        padding='same' if strides == (1, 1) else 'valid',
                        depth_multiplier=depth_multiplier,
                        strides=strides,
                        use_bias=False,
                        name='conv_dw_%d' % block_id)(x)
    x = BatchNormalization(name='conv_dw_%d_bn' % block_id)(x)
    x = ReLU(6., name='conv_dw_%d_relu' % block_id)(x)

    # PointWise Convolution with 1X1 Filters, No of Filters = pointwise_conv_filters
    x = Conv2D(pointwise_conv_filters, (1, 1),
               padding='same',
               use_bias=False,
               strides=(1, 1),
               name='conv_pw_%d' % block_id)(x)
    x = BatchNormalization(name='conv_pw_%d_bn' % block_id)(x)
    x = ReLU(6., name='conv_pw_%d_relu' % block_id)(x)

    return x
Beispiel #14
0
    def __init__(self, opts):
        self.opts = opts
        self.train_gen, self.val_gen, self.train_paths, self.train_labels, self.test_paths,\
                                                    self.test_labels = get_train_test_generator(self.opts)

        init = Input(self.opts['SHAPE'])
        x = BatchNormalization(axis=-1)(init)
        x = Conv2D(32, (3, 3), strides=(2, 2))(x)
        x = ReLU()(x)

        x = BatchNormalization(axis=-1)(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)
        feature_map_1 = Dropout(self.opts['drop_ratio'])(x)

        x = BatchNormalization(axis=-1)(feature_map_1)
        x = Conv2D(64, (3, 3), strides=(2, 2))(x)
        x = ReLU()(x)

        x = BatchNormalization(axis=-1)(x)
        x = Conv2D(64, (3, 3))(x)
        x = ReLU()(x)
        x = BatchNormalization(axis=-1)(x)
        x = Conv2D(64, (3, 3))(x)
        x = ReLU()(x)

        x = BatchNormalization(axis=-1)(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)
        feature_map_2 = Dropout(self.opts['drop_ratio'])(x)

        x = BatchNormalization(axis=-1)(feature_map_2)
        x = Conv2D(128, (3, 3))(x)
        x = ReLU()(x)
        x = BatchNormalization(axis=-1)(x)
        x = Conv2D(128, (3, 3))(x)
        x = ReLU()(x)
        x = BatchNormalization(axis=-1)(x)
        x = Conv2D(128, (3, 3))(x)
        x = ReLU()(x)
        feature_map_3 = Dropout(self.opts['drop_ratio'])(x)

        gap1 = GlobalAveragePooling2D()(feature_map_1)
        gap2 = GlobalAveragePooling2D()(feature_map_2)
        gap3 = GlobalAveragePooling2D()(feature_map_3)

        x = Concatenate()([gap1, gap2, gap3])
        x = Dense(256, activation='relu')(x)
        x = Dropout(0.5)(x)

        x = Dense(256, activation='relu')(x)
        x = Dropout(0.1)(x)
        x = Dense(28)(x)
        x = Activation('sigmoid')(x)

        self.model = Model(init, x)
        self.model.compile(loss='binary_crossentropy',
                           optimizer=Adam(lr=1e-4),
                           metrics=['accuracy', f1])
Beispiel #15
0
    def __init__(self,
                 input_shape,
                 base_model='resnet152',
                 num_classes=20,
                 num_bilstms=1,
                 hidden_size=500,
                 include_relu=False,
                 freeze_base_net=True,
                 grid7=False,
                 weights=None):

        super().__init__(input_shape, base_model, num_classes=num_classes)

        self.base_model, added_layers = BaseNetE2E(
            base_model, input_shape, conv_output=True,
            conv7_output=grid7).get_model()

        freeze_len = len(self.base_model.layers) - added_layers

        input1 = Dense(hidden_size)(self.base_model.layers[-1].output)
        input1 = Conv2LSTM()(input1)
        c = []
        for i in range(2):
            bilstm = LSTMInputSequence(i)(input1)
            bilstm = TimeDistributed(ReLU())(bilstm)
            bilstm = TimeDistributed(Dense(hidden_size))(bilstm)
            bilstm = TimeDistributed(ReLU())(bilstm)
            for j in range(num_bilstms):
                lstm_size = hidden_size * np.power(2, j)
                bilstm = Bidirectional(LSTM(lstm_size,
                                            return_sequences=True))(bilstm)
            c.append(bilstm)
        model1 = Concatenate()(c)
        model1 = ReLU()(model1)
        model1 = Dense(self.num_classes)(model1)
        if include_relu != 0:
            model1 = TimeDistributed(ReLU())(model1)
        self.model = Model(inputs=self.base_model.input, outputs=model1)

        if freeze_base_net:
            for l in self.model.layers[:freeze_len]:
                l.trainable = False
Beispiel #16
0
def unet(pretrained_weights = None,input_size = input_size):
    inputs = Input(input_size)
    strides1 = (1, 1, 1)
    strides2 = (1, 1, 2)
    conv00 = Conv3D(64, size0,strides=stride0,dim_ordering='tf', activation=None,padding='valid', kernel_initializer = 'he_normal')(inputs)
    conv00 = BatchNormalization(axis=3, momentum=0.99, epsilon=1e-06,center=True, scale=True)(conv00)
    conv00 = ReLU()(conv00)    
    conv1 = Conv3D(64, (3,3,3),strides=(1,1,2),dim_ordering='tf', activation=None,padding='same', kernel_initializer = 'he_normal')(conv00)
    conv1 = BatchNormalization(axis=3, momentum=0.99, epsilon=1e-06,center=True, scale=True)(conv1)
    conv1 = ReLU()(conv1)   
    conv2 = Conv3D(128, (3,3,3),strides=strides2,dim_ordering='tf', activation=None,padding='same', kernel_initializer = 'he_normal')(conv1)    
    conv2= BatchNormalization(axis=3, momentum=0.99, epsilon=1e-06,center=True, scale=True)(conv2)
    conv2= ReLU()(conv2)
    conv3 = Conv3D(256, (3,3,3), strides=strides2,dim_ordering='tf', activation=None,padding='same', kernel_initializer = 'he_normal')(conv2)
    conv3 = BatchNormalization(axis=3, momentum=0.99, epsilon=1e-06,center=True, scale=True)(conv3)
    conv3 = ReLU()(conv3)
    conv4 = Conv3D(filter1_4, (3,3,3), strides=strides2,dim_ordering='tf' ,activation=None,padding='same', kernel_initializer = 'he_normal')(conv3)
    conv4 = BatchNormalization(axis=3, momentum=0.99, epsilon=1e-06,center=True, scale=True)(conv4)
    conv4 = ReLU()(conv4)
    conv5 = Conv3D(64, (1,1,3), strides=strides2,dim_ordering='tf', activation=None,padding='same', kernel_initializer = 'he_normal')(conv00) 
    conv5 = BatchNormalization(axis=3, momentum=0.99, epsilon=1e-06,center=True, scale=True)(conv5)
    conv5 = ReLU()(conv5)    
    conv6 = Conv3D(128, (1,1,3), strides=strides2,dim_ordering='tf', activation=None,padding='same', kernel_initializer = 'he_normal')(conv5) 
    conv6 = BatchNormalization(axis=3, momentum=0.99, epsilon=1e-06,center=True, scale=True)(conv6)
    conv6 = ReLU()(conv6)  
    conv7 = Conv3D(256, (1,1,3), strides=strides2,dim_ordering='tf', activation=None,padding='same', kernel_initializer = 'he_normal')(conv6)
    conv7 = BatchNormalization(axis=3, momentum=0.99, epsilon=1e-06,center=True, scale=True)(conv7)
    conv7 = ReLU()(conv7) 
    conv8 = Conv3D(filter3_4, (1,1,3), strides=strides2,dim_ordering='tf' ,activation=None,padding='same', kernel_initializer = 'he_normal')(conv7)
    conv8 = BatchNormalization(axis=3, momentum=0.99, epsilon=1e-06,center=True, scale=True)(conv8)
    conv8 = ReLU()(conv8)    
    conv9 = Conv3D(filter3_5, (1,1,3), strides=strides1,dim_ordering='tf' ,activation=None,padding='same', kernel_initializer = 'he_normal')(conv8)
    conv9 = BatchNormalization(axis=3, momentum=0.99, epsilon=1e-06,center=True, scale=True)(conv9)
    conv9 = ReLU()(conv9)    
    conv9 = concatenate([conv4,conv9],axis=4)
    conv10= Conv3D(classnum, (1,1,1),strides =strides1, activation = 'softmax'
                   , padding = 'same', kernel_initializer = 'he_normal',name='conv_')(conv9)
    model = Model(input=inputs, output=conv10)
    model.summary()
    if(pretrained_weights):
        	model.load_weights(pretrained_weights)
    return model
def res_block(inpt, n_filters, strides, prob, training):
    if training:
        # residual
        x = inpt
        x = Conv_BN(x, n_filters // 4, 1, strides=strides, activation='relu')
        x = Conv_BN(x, n_filters // 4, 3, strides=1, activation='relu')
        residual = Conv_BN(x, n_filters, 1, strides=1, activation=None)
        # shortcut
        if strides != 1 or K.int_shape(inpt)[-1] != n_filters:
            skip = Conv_BN(inpt,
                           n_filters,
                           1,
                           strides=strides,
                           activation=None)
        else:
            skip = inpt
        active = RandomBinomial(prob)(skip)  # [0,1] switch for training
        active = tf.Print(active, [active], message="   active  ")
        x = Lambda(lambda x: tf.cond(x[2] > 0, lambda: ReLU()
                                     (add([x[0], x[1]])), lambda: x[1]))(
                                         [residual, skip, active])
        return x

    else:
        # residual
        x = inpt
        x = Conv_BN(x, n_filters // 4, 1, strides=strides, activation='relu')
        x = Conv_BN(x, n_filters // 4, 3, strides=1, activation='relu')
        x = Conv_BN(x, n_filters, 1, strides=1, activation=None)
        residual = Lambda(lambda x: prob * x)(x)
        # shortcut
        if strides != 1 or inpt._keras_shape[-1] != n_filters:
            skip = Conv_BN(inpt,
                           n_filters,
                           1,
                           strides=strides,
                           activation=None)
        else:
            skip = inpt
        x = add([residual, skip])
        x = ReLU()(x)
        return x
Beispiel #18
0
    def get_generator(self, _input):

        _input = Input(shape=(self.z_shape, ))
        x = Dense(2 * 2 * 512, kernel_initializer=self.init)(_input)
        x = ReLU()(x)
        x = Reshape((2, 2, 512))(x)
        x = BatchNormalization()(x)
        x = Conv2DTranspose(filters=256,
                            kernel_size=(5, 5),
                            strides=(2, 2),
                            padding='same',
                            use_bias=False)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Conv2DTranspose(filters=128,
                            kernel_size=(5, 5),
                            strides=(2, 2),
                            padding='same',
                            use_bias=False)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Conv2DTranspose(filters=64,
                            kernel_size=(5, 5),
                            strides=(2, 2),
                            padding='same',
                            use_bias=False)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Conv2DTranspose(filters=3,
                            kernel_size=(5, 5),
                            strides=(2, 2),
                            activation='tanh',
                            padding='same',
                            use_bias=False)(x)
        #x = Conv2DTranspose(filters = 3, kernel_size = (5,5), strides = (2,2), padding = 'same', use_bias = False)(x)
        #x = BatchNormalization()(x)
        #x = BatchNormalization()(x)
        #x = ReLU()(x)
        #x = Conv2D(filters = 3, kernel_size = (3,3),padding = 'same', activation = 'tanh')(x)

        model = Model(_input, x)
        return model
    def create_net(self, input_shape):
        left_input = Input(shape=input_shape)
        right_input = Input(shape=input_shape)

        siamese_model_branch_sequence = [
            Conv2D(32, (3, 3), padding='same'),
            Conv2D(32, (3, 3), padding='same'),
            Conv2D(32, (3, 3), padding='same'),
            BatchNormalization(),
            ReLU(),
            MaxPooling2D((2, 2), padding='same'),
            Conv2D(64, (3, 3), padding='same'),
            Conv2D(32, (3, 3), padding='same'),
            Conv2D(32, (3, 3), padding='same'),
            BatchNormalization(),
            ReLU(),
            MaxPooling2D((2, 2), padding='same'),
            Conv2D(128, (3, 3), padding='same'),
            Conv2D(32, (3, 3), padding='same'),
            Conv2D(32, (3, 3), padding='same'),
            BatchNormalization(),
            ReLU(),
            MaxPooling2D((2, 2), padding='same'),
            Flatten(),
            Dense(128, activation='sigmoid')
        ]

        branch = Sequential(siamese_model_branch_sequence)

        left_embedding = branch(left_input)
        right_embedding = branch(right_input)

        # Keep the left branch as embedding model for predictions
        self.embedding = Model(left_input, left_embedding)
        # Using custom Lambda layer to compute eucldiean distance between the outputs of both branches
        distance_euclid = Lambda(
            lambda tensors: K.abs(tensors[0] - tensors[1]))(
                [left_embedding, right_embedding])
        similarity_output = Dense(1, activation='sigmoid')(distance_euclid)

        self.model = Model([left_input, right_input], similarity_output)
        return
Beispiel #20
0
 def build_generator(self):
     """Generator network."""
     # Input tensors
     inp_c = Input(shape = (self.c_dim, ))
     inp_img = Input(shape = (self.image_size, self.image_size, 3))
 
     # Replicate spatially and concatenate domain information
     c = Lambda(lambda x: K.repeat(x, self.image_size**2))(inp_c)
     c = Reshape((self.image_size, self.image_size, self.c_dim))(c)
     x = Concatenate()([inp_img, c])
 
     # First Conv2D
     x = Conv2D(filters = self.g_conv_dim, kernel_size = 7, strides = 1, padding = 'same', use_bias = False)(x)
     x = InstanceNormalization(axis = -1)(x)
     x = ReLU()(x)
 
     # Down-sampling layers
     curr_dim = self.g_conv_dim
     for i in range(2):
         x = ZeroPadding2D(padding = 1)(x)
         x = Conv2D(filters = curr_dim*2, kernel_size = 4, strides = 2, padding = 'valid', use_bias = False)(x)
         x = InstanceNormalization(axis = -1)(x)
         x = ReLU()(x)
         curr_dim = curr_dim * 2
     
     # Bottleneck layers.
     for i in range(self.g_repeat_num):
         x = self.ResidualBlock(x, curr_dim)
     
     # Up-sampling layers
     for i in range(2):
         x = UpSampling2D(size = 2)(x)       
         x = Conv2D(filters = curr_dim // 2, kernel_size = 4, strides = 1, padding = 'same', use_bias = False)(x)
         x = InstanceNormalization(axis = -1)(x)
         x = ReLU()(x)        
         curr_dim = curr_dim // 2
 
     # Last Conv2D
     x = ZeroPadding2D(padding = 3)(x)
     out = Conv2D(filters = 3, kernel_size = 7, strides = 1, padding = 'valid', activation = 'tanh', use_bias = False)(x)
 
     return Model(inputs = [inp_img, inp_c], outputs = out)        
Beispiel #21
0
def ConvBNReluUnit(input, kernel_size=8, index=0):
    x = Conv1D(filters=64,
               kernel_size=kernel_size,
               padding='same',
               kernel_initializer='glorot_uniform',
               name='conv{}'.format(index + 1))(input)
    x = BatchNormalization(name='conv{}-bn'.format(index + 1))(x)
    x = ReLU(name='conv{}-relu'.format(index + 1))(x)
    x = MaxPool1D(pool_size=2, strides=2,
                  name='maxpool{}'.format(index + 1))(x)
    return x
def predict_block(inputs, out_channel, sym, id):
    name = 'ssd_' + sym + '{}'.format(id)
    x = DepthwiseConv2D(kernel_size=3, strides=1,
                           activation=None, use_bias=False, padding='same', name=name + '_dw_conv')(inputs)
    x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=name + '_dw_bn')(x)
    x = ReLU(6., name=name + '_dw_relu')(x)

    x = Conv2D(out_channel, kernel_size=1, padding='same', use_bias=False,
                  activation=None, name=name + 'conv2')(x)
    x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=name + 'conv2_bn')(x)
    return x
Beispiel #23
0
def conv_block(x, filters, kernel, stride, layer_num, pooling=False):
    x = Conv2D(filters, (kernel, kernel),
               strides=(stride, stride),
               padding='same',
               name='conv_' + str(layer_num),
               use_bias=False)(x)
    x = BatchNormalization(name='norm_' + str(layer_num))(x)
    x = ReLU()(x)
    if pooling: x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.2)(x)
    return x
Beispiel #24
0
def make_model(feature_shape):
    input_layer = Input(shape=(32, ) + feature_shape)

    conv_1 = Conv1D(128, 8)(input_layer)
    batch_norm_1 = BatchNormalization()(conv_1)
    block_1 = ReLU()(batch_norm_1)

    conv_2 = Conv1D(256, 5)(block_1)
    batch_norm_2 = BatchNormalization()(conv_2)
    block_2 = ReLU()(batch_norm_2)

    pool_1 = AveragePooling1D()(block_2)
    flatten_1 = Flatten()(pool_1)
    dense_1 = Dense(NUM_CLASSES)(flatten_1)
    out = Softmax()(dense_1)

    model = Model(inputs=input_layer, outputs=out)
    model.compile("adam", loss="categorical_crossentropy", metrics=["acc"])

    return model
def first_type_layer(input_value, size):
    convolution_layer = Conv2D(size,
                               kernel_size=(3, 3),
                               padding='same',
                               kernel_initializer='random_normal',
                               use_bias=True,
                               bias_initializer=conv_init,
                               data_format="channels_first")(input_value)
    batch_normalized_layer = BatchNormalization()(convolution_layer)
    activation_layer = ReLU()(batch_normalized_layer)
    return activation_layer
 def resNet64(x):
     x = Conv2D(64, (1, 1),
                padding='same',
                activation='relu',
                kernel_initializer=he_normal)(x)
     shortcut = x
     x = Conv2D(64,
                kernel_size=(3, 3),
                strides=(1, 1),
                padding='same')(x)
     x = BatchNormalization()(x)
     x = ReLU()(x)
     x = Conv2D(64,
                kernel_size=(3, 3),
                strides=(1, 1),
                padding='same')(x)
     x = BatchNormalization()(x)
     x = add([shortcut, x])
     x = ReLU()(x)
     return x
Beispiel #27
0
def deepspeech_custom(is_gpu: bool,
                      layers: List[dict],
                      input_dim: int,
                      to_freeze: List[dict] = [],
                      random_state=1) -> Model:
    np.random.seed(random_state)
    set_random_seed(random_state)

    constructors = {
        'BatchNormalization':
        lambda params: BatchNormalization(**params),
        'Conv2D':
        lambda params: Conv2D(**params, name=name),
        'Dense':
        lambda params: TimeDistributed(Dense(**params), name=name),
        'Dropout':
        lambda params: Dropout(**params),
        'LSTM':
        lambda params: Bidirectional(CuDNNLSTM(**params) if is_gpu else LSTM(
            activation='tanh', recurrent_activation='sigmoid', **params),
                                     merge_mode='sum',
                                     name=name),
        'ReLU':
        lambda params: ReLU(**params),
        'ZeroPadding2D':
        lambda params: ZeroPadding2D(**params),
        'expand_dims':
        lambda params: Lambda(expand_dims, arguments=params),
        'squeeze':
        lambda params: Lambda(squeeze, arguments=params),
        'squeeze_last_dims':
        lambda params: Reshape([-1, params['units']])
    }
    with tf.device('/cpu:0'):
        input_tensor = Input([None, input_dim], name='X')

        x = input_tensor
        for params in layers:
            constructor_name = params.pop('constructor')
            name = params.pop(
                'name'
            ) if 'name' in params else None  # `name` is implicit passed to constructors
            constructor = constructors[
                constructor_name]  # Conv2D, TimeDistributed and Bidirectional.
            layer = constructor(params)
            x = layer(x)
        output_tensor = x

        model = Model(input_tensor, output_tensor, name='DeepSpeech')
        for params in to_freeze:
            name = params.pop('name')
            layer = model.get_layer(name)
            layer.trainable = False
    return model
Beispiel #28
0
def ConvBN(x,
           filters,
           kernel_size,
           strides=1,
           padding='same',
           activation=None):
    x = Conv3D(filters, kernel_size, strides=strides, padding=padding)(x)
    x = BatchNormalization()(x)
    if activation:
        x = ReLU()(x)
    return x
Beispiel #29
0
def res_block(x, n_filters, strides):
    inpt = x
    # residual
    x = Conv_BN(x, n_filters, 3, strides=strides, activation='relu')
    x = Conv_BN(x, n_filters, 3, strides=1, activation=None)
    # shortcut
    if strides!=1 or inpt._keras_shape[-1]!=n_filters:
        inpt = Conv_BN(inpt, n_filters, 1, strides=strides, activation=None)
    x = add([inpt, x])
    x = ReLU()(x)
    return x
Beispiel #30
0
def Sep_Conv_BN(x, filters, strides, activation=True):
    x = DepthwiseConv2D(kernel_size=3, strides=1, padding='same')(x)
    x = BatchNormalization()(x)
    x = ReLU()(x)
    x = Conv_BN(x,
                filters,
                kernel_size=1,
                strides=strides,
                activation=True,
                dilation_rate=1)
    return x