Esempio n. 1
0
    def build(self, input_shape):
        self.prep = layers.Lambda(
            lambda img: preprocess_input(tf.cast(img, tf.float32), 'channels_last', 'tf'), name='preprocess')

        self.stage1 = RSU7(32, 64)
        self.pool12 = layers.MaxPool2D(2, padding='same')

        self.stage2 = RSU6(32, 128)
        self.pool23 = layers.MaxPool2D(2, padding='same')

        self.stage3 = RSU5(64, 256)
        self.pool34 = layers.MaxPool2D(2, padding='same')

        self.stage4 = RSU4(128, 512)
        self.pool45 = layers.MaxPool2D(2, padding='same')

        self.stage5 = RSU4F(256, 512)
        self.pool56 = layers.MaxPool2D(2, padding='same')

        self.stage6 = RSU4F(256, 512)

        # decoder
        self.stage5d = RSU4F(256, 512)
        self.stage4d = RSU4(128, 256)
        self.stage3d = RSU5(64, 128)
        self.stage2d = RSU6(32, 64)
        self.stage1d = RSU7(16, 64)

        self.proj1 = HeadProjection(self.classes, kernel_size=3)
        self.proj2 = HeadProjection(self.classes, kernel_size=3)
        self.proj3 = HeadProjection(self.classes, kernel_size=3)
        self.proj4 = HeadProjection(self.classes, kernel_size=3)
        self.proj5 = HeadProjection(self.classes, kernel_size=3)
        self.proj6 = HeadProjection(self.classes, kernel_size=3)
        self.act = HeadActivation(self.classes)
        self.head = ClassificationHead(self.classes)

        super().build(input_shape)
def build_model_with_dropout():
    model = models.Sequential()
    model.add(
        layers.Conv2D(32, (3, 3), activation='relu',
                      input_shape=(150, 150, 3)))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), activation='relu'))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Conv2D(128, (3, 3), activation='relu'))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Conv2D(128, (3, 3), activation='relu'))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))

    print(model.summary())

    model.compile(loss='binary_crossentropy',
                  optimizer=optimizers.RMSprop(lr=1e-4),
                  metrics=['acc'])
    return model
Esempio n. 3
0
def train8():
    train_generator = train_datagen.flow_from_directory(
        train_dir,
        target_size=(20, 32),
        batch_size=90,
        class_mode='categorical',
        shuffle=True)
    validation_generator = validation_datagen.flow_from_directory(
        validation_dir,
        target_size=(20, 32),
        batch_size=90,
        class_mode='categorical',
        shuffle=True)
    test_generator = test_datagen.flow_from_directory(test_dir,
                                                      target_size=(20, 32),
                                                      batch_size=1,
                                                      class_mode='categorical',
                                                      shuffle=True)

    model = models.Sequential()
    model.add(
        layers.Conv2D(256, (3, 3), activation='relu', input_shape=(20, 32, 3)))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Flatten())
    model.add(
        layers.Dense(256,
                     activation='relu',
                     kernel_regularizer=regularizers.l2(0.001)))
    model.add(
        layers.Dense(1024,
                     activation='relu',
                     kernel_regularizer=regularizers.l2(0.001)))
    model.add(layers.Dense(88, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['acc'])

    history = model.fit_generator(
        train_generator,
        steps_per_epoch=train_generator.n // train_generator.batch_size,
        epochs=7,
        validation_data=validation_generator,
        validation_steps=validation_generator.n //
        validation_generator.batch_size,
    )

    score = model.evaluate_generator(test_generator, test_generator.n)

    model.save("test8_{}.h5".format(score[1]))
Esempio n. 4
0
def build_model():
    layer_one_input = keras.Input(shape=(width, height, depth))
    l_0 = layers.AveragePooling2D()(layer_one_input)
    l_1 = layers.Conv2D(96, (11, 11))(l_0)
    l_2 = layers.MaxPool2D()(l_1)
    l_3 = layers.Conv2D(96, (5, 5))(l_2)
    l_4 = layers.MaxPool2D()(l_3)
    l_5 = layers.Conv2D(96, (3, 3))(l_4)
    p = layers.MaxPool2D()(l_5)

    l_6 = layers.Conv2D(96, (3, 3))(p)
    l_7 = layers.MaxPool2D()(l_6)

    l_8 = layers.Reshape(target_shape=(-1, ))(l_7)

    l_9 = layers.Dense(256)(l_8)
    l_10 = layers.Dense(4800)(l_9)

    l_11 = layers.Reshape(target_shape=(80, 60))(l_10)

    model = keras.models.Model(inputs=[layer_one_input], outputs=l_11)
    model.compile(optimizer=optimizer, loss='binary_crossentropy')
    return model
Esempio n. 5
0
    def __init__(self):
        super().__init__()
        self.conv = tf.keras.Sequential(name='conv')
        # for i in [64,128,256,512,512]:
        for i in [64, 128]:
            self.conv.add(layers.Conv2D(i, 3, strides=(1, 1), padding='same', activation='relu'))
            self.conv.add(layers.MaxPool2D(pool_size=(2, 2)))

        self.linear = tf.keras.Sequential(name='linear')
        self.linear.add(layers.Flatten())
        # for i in range(2):
        #     self.linear.add(layers.Dense(4096,activation='relu'))
        #     self.linear.add(layers.Dropout(0.2))
        self.linear.add(layers.Dense(5))
Esempio n. 6
0
    def get_keras_model(self, verbose=False):
        model = Sequential()
        model.add(layers.InputLayer(input_shape=self.input_shape))

        conv_counter = 0
        for conv_layer in self.conv_layers:
            conv_counter += 1
            kernel_initializer = conv_layer[
                "kernel_initializer"] if "kernel_initializer" in conv_layer else "glorot_uniform"

            model.add(
                layers.Conv2D(conv_layer["filters"],
                              conv_layer["kernel_size"],
                              strides=conv_layer["strides"],
                              padding=conv_layer["padding"],
                              activation=conv_layer["activation"],
                              kernel_initializer=kernel_initializer,
                              name=conv_layer_name(conv_counter),
                              data_format=self.data_format))
            model.add(
                layers.MaxPool2D(pool_size=conv_layer["max_pooling"]["size"],
                                 strides=conv_layer["max_pooling"].get(
                                     "strides", None),
                                 data_format=self.data_format))
            if "dropout" in conv_layer:
                model.add(
                    layers.SpatialDropout2D(conv_layer["dropout"],
                                            data_format='channels_first'))

        model.add(layers.Flatten(data_format='channels_first'))

        dense_counter = 0
        for dense_layer in self.dense_layers:
            dense_counter += 1
            kernel_initializer = dense_layer[
                "kernel_initializer"] if "kernel_initializer" in dense_layer else "glorot_uniform"

            model.add(
                layers.Dense(dense_layer["units"],
                             activation=dense_layer["activation"],
                             kernel_initializer=kernel_initializer,
                             name=dense_layer_name(dense_counter)))

            if "dropout" in dense_layer:
                model.add(layers.Dropout(dense_layer["dropout"]))

        if verbose:
            model.summary()

        return model
Esempio n. 7
0
def inception_module(in_tensor, c1, c3_1, c3, c5_1, c5, pp):
    conv1 = conv1x1(c1)(in_tensor)

    conv3_1 = conv1x1(c3_1)(in_tensor)
    conv3 = conv3x3(c3)(conv3_1)

    conv5_1 = conv1x1(c5_1)(in_tensor)
    conv5 = conv5x5(c5)(conv5_1)

    pool_conv = conv1x1(pp)(in_tensor)
    pool = layers.MaxPool2D(3, strides=1, padding='same')(pool_conv)

    merged = layers.Concatenate(axis=-1)([conv1, conv3, conv5, pool])
    return merged
Esempio n. 8
0
def init_model():
    reg = keras.regularizers.l2(.0)

    return keras.Sequential([
        layers.Conv2D(filters=32,kernel_size=(5,5),padding='same',activation='relu', \
                      kernel_regularizer=reg,input_shape=(28,28,1)),
        layers.Conv2D(filters=32,kernel_size=(5,5),padding='same', \
                      activation='relu',kernel_regularizer=reg),
        layers.MaxPool2D(pool_size=(2,2)),
        layers.Dropout(0.25), # 输出为(N,14,14,32)
        layers.BatchNormalization(),
        layers.Conv2D(filters=64,kernel_size=(3,3),padding='same', \
                      activation='relu',kernel_regularizer=reg),
        layers.Conv2D(filters=64,kernel_size=(3,3),padding='same', \
                      activation='relu',kernel_regularizer=reg),
        layers.MaxPool2D(pool_size=(2,2)),
        layers.Dropout(0.25), # 输出为(N,7,7,64)
        layers.BatchNormalization(),
        layers.Flatten(), # 输出为(N,7*7*64) 
        layers.Dense(256, activation='relu',kernel_regularizer=reg),
        layers.Dropout(0.25), # 输出为(N,256)
        layers.BatchNormalization(),
        layers.Dense(10, activation='softmax',kernel_regularizer=reg), # 输出类别概率
    ])
Esempio n. 9
0
def alexnet_model2(input_dims, nb_labels, activation, optimizer, norm=False):
    model = models.Sequential()

    model.add(layers.Convolution2D(92, 7, 3, input_shape=input_dims))
    if norm:
        model.add(layers.BatchNormalization())
    model.add(layers.Activation(activation))
    model.add(
        layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='valid'))

    model.add(layers.Convolution2D(256, 5, 1))
    if norm:
        model.add(layers.BatchNormalization())
    model.add(layers.Activation(activation))
    model.add(
        layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='valid'))

    model.add(layers.Convolution2D(384, 3, 1))

    model.add(layers.Convolution2D(384, 3, 1))

    model.add(layers.Convolution2D(384, 3, 1))
    model.add(layers.Activation(activation))
    model.add(
        layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='valid'))

    #Fully conected end layers
    model.add(layers.Flatten())
    model.add(layers.Dense(2048, activation=activation))
    model.add(layers.Dense(2048, activation=activation))
    model.add(layers.Dense(nb_labels, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    return model
    def build(dim):
        model = models.Sequential()

        #convolution layer with 4 3x3 kernels, passed the input image
        model.add(
            layers.Conv2D(4, (3, 3),
                          activation='relu',
                          input_shape=(dim, dim, 1)))
        model.add(layers.MaxPool2D(
            (2, 2)))  #use maxpooling 2x2 with a stride of 2

        #second convolution layer with only 2 3x3 kernels
        model.add(layers.Conv2D(2, (3, 3), activation='relu'))
        model.add(layers.MaxPool2D((2, 2)))

        #Remove the first fully connected layer and pass directly to softmax layer
        model.add(layers.Flatten())

        #Output layer with softmax classifier to for multiclass-single label problem
        model.add(layers.Dense(4, activation='softmax'))
        model.summary()

        num_layers = 4  #parameter used for plotting activation maps in Visualization
        return model, num_layers
Esempio n. 11
0
def build_model():
    model = models.Sequential()
    model.add(
        layers.Conv2D(32,
                      kernel_size=(3, 3),
                      activation='relu',
                      input_shape=(150, 150, 3)))  # (148, 148)
    model.add(layers.MaxPool2D((2, 2)))  # (74, 74)
    model.add(layers.Conv2D(64, kernel_size=(3, 3),
                            activation='relu'))  # 72, 72
    model.add(layers.MaxPool2D((2, 2)))  # 36, 36
    model.add(layers.Conv2D(128, kernel_size=(3, 3),
                            activation='relu'))  # 34, 34
    model.add(layers.MaxPool2D((2, 2)))  # 17, 17
    model.add(layers.Conv2D(128, kernel_size=(3, 3),
                            activation='relu'))  # 15, 15
    model.add(layers.MaxPool2D((2, 2)))  # 7, 7

    model.add(layers.Flatten())
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))

    return model
Esempio n. 12
0
def inception_module(in_tensor, n1, n3_1, n3, n5_1, n5, pp):
    conv1 = conv1x1(n1)(in_tensor)

    conv3_1 = conv1x1(n3_1)(in_tensor)
    conv3 = conv3x3(n3)(conv3_1)

    conv5_1 = conv1x1(n5_1)(in_tensor)
    conv5 = conv5x5(n5)(conv5_1)

    #从论文上看,应该是先maxpool 再1x1卷积
    pool_conv = conv1x1(pp)(in_tensor)
    pool = layers.MaxPool2D(3, strides=1, padding='same')(pool_conv)

    merged = layers.Concatenate(axis=-1)([conv1, conv3, conv5, pool])
    return merged
Esempio n. 13
0
def getModel(input_shape):
    model = models.Sequential()

    model.add(layers.Conv2D(64, (3, 3), activation='relu', name='B1Conv'
                            , input_shape=input_shape))
    model.add(layers.MaxPool2D((2, 2), name='B1MAx'))

    model.add(layers.Conv2D(64, (3, 3), activation='relu', name='B2Conv'))
    model.add(layers.MaxPool2D((2, 2), name='B2MAx'))

    model.add(layers.Conv2D(64, (3, 3), activation='relu', name='B3Conv'))
    model.add(layers.MaxPool2D((2, 2), name='B3MAx'))

    model.add(layers.Conv2D(128, (3, 3), activation='relu', name='B4Conv'))
    model.add(layers.MaxPool2D((2, 2), name='B4MAx'))

    model.add(layers.Flatten())
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))

    model.compile(optimizer=opt.RMSprop(),
                  loss=lss.binary_crossentropy,
                  metrics=['acc'])
    return model
Esempio n. 14
0
def build_model():
    print('... construct network')


    inputs = layers.Input((32, 32, 3))
    x = layers.Conv2D(32, (3, 3), activation='relu')(inputs)
    x = layers.Conv2D(32, (3, 3), activation='relu')(x)
    x = layers.MaxPool2D((2, 2))(x)
    x = layers.Dropout(0.25)(x)
    x = layers.Flatten()(x)
    x = layers.Dense(128)(x)
    x = layers.Dropout(0.5)(x)
    out = layers.Dense(1, activation='linear')(x)

    return Model(inputs=inputs, outputs=out)
Esempio n. 15
0
 def _create_Kao_Rnet(self, weight_path='./models/mtcnn/24net.h5'):
     '''
     
     '''
     input = KL.Input(shape=[24, 24, 3])  # change this shape to [None,None,3] to enable arbitraty shape input
     x = KL.Conv2D(28, (3, 3), strides=1, padding='valid', name='conv1', data_format="channels_last")(input)
     x = KL.PReLU(shared_axes=[1, 2], name='prelu1')(x)
     x = KL.MaxPool2D(pool_size=3, strides=2, padding='same', data_format="channels_last")(x)
 
     x = KL.Conv2D(48, (3, 3), strides=1, padding='valid', name='conv2', data_format="channels_last")(x)
     x = KL.PReLU(shared_axes=[1, 2], name='prelu2')(x)
     x = KL.MaxPool2D(pool_size=3, strides=2, data_format="channels_last")(x)
 
     x = KL.Conv2D(64, (2, 2), strides=1, padding='valid', name='conv3', data_format="channels_last")(x)
     x = KL.PReLU(shared_axes=[1, 2], name='prelu3')(x)
     x = KL.Permute((3, 2, 1))(x)
     x = KL.Flatten()(x)
     x = KL.Dense(128, name='conv4')(x)
     x = KL.PReLU(name='prelu4')(x)
     classifier = KL.Dense(2, activation='softmax', name='conv5-1')(x)
     bbox_regress = KL.Dense(4, name='conv5-2')(x)
     model = Model([input], [classifier, bbox_regress])
     model.load_weights(weight_path, by_name=True)
     return model
Esempio n. 16
0
def alexnet(in_shape=(227, 227, 3), n_classes=1000, opt='sgd'):
    in_layer = layers.Input(in_shape)
    conv1 = layers.Conv2D(96, 11, strides=4, activation='relu')(in_layer)
    pool1 = layers.MaxPool2D(3, 2)(conv1)
    conv2 = layers.Conv2D(256, 5, strides=1, padding='same',
                          activation='relu')(pool1)
    pool2 = layers.MaxPool2D(3, 2)(conv2)
    conv3 = layers.Conv2D(384, 3, strides=1, padding='same',
                          activation='relu')(pool2)
    conv4 = layers.Conv2D(256, 3, strides=1, padding='same',
                          activation='relu')(conv3)
    pool3 = layers.MaxPool2D(3, 2)(conv4)
    flattened = layers.Flatten()(pool3)
    dense1 = layers.Dense(4096, activation='relu')(flattened)
    drop1 = layers.Dropout(0.5)(dense1)
    dense2 = layers.Dense(4096, activation='relu')(drop1)
    drop2 = layers.Dropout(0.5)(dense2)
    preds = layers.Dense(n_classes, activation='softmax')(drop2)

    model = Model(in_layer, preds)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])
    return model
def conv_block(X,
               units,
               kernel=(3, 3),
               pad='same',
               pooling=True,
               drop=False,
               Activation=layers.LeakyReLU()):
    X = layers.Conv2D(units, kernel, padding=pad)(X)
    X = layers.BatchNormalization()(X)
    if pooling:
        X = layers.MaxPool2D()(X)
    X = Activation(X)
    if drop:
        X = layers.GaussianDropout(.3)(X)
    return X
Esempio n. 18
0
def alexnet_graph(imgs):
    x = KL.Conv2D(192,kernel_size=11,strides=2,name = 'conv0')(imgs)
    
    x = KL.BatchNormalization(name = 'bn0')(x)
    x = KL.Activation('relu',name='relu0')(x)
    x = KL.MaxPool2D(3, strides=2, name= 'pool0')(x)
    
    x = KL.Conv2D(512,kernel_size=5,name = 'conv1')(x)
    x = KL.BatchNormalization(name='bn1')(x)
    x = KL.Activation('relu',name='relu1')(x)
    x = KL.MaxPool2D(3, strides=2,name='pool1')(x)
    
    x = KL.Conv2D(768,kernel_size=3,name='conv2')(x)
    x = KL.BatchNormalization(name='bn2')(x)
    x = KL.Activation('relu',name='relu2')(x)

    x = KL.Conv2D(768,kernel_size=3,name='conv3')(x)
    x = KL.BatchNormalization(name='bn3')(x)
    x = KL.Activation('relu',name='relu3')(x)
    
    x = KL.Conv2D(512,kernel_size=3,name='conv4')(x)
    x = KL.BatchNormalization(name='bn4')(x)
    
    return [x]
Esempio n. 19
0
def convolution():
    inn = layers.Input(shape=(sequence_length, embedding_dimension, 1))
    cnns = []
    for size in filter_sizes:
        conv = layers.Conv2D(filters=64,
                             kernel_size=(size, embedding_dimension),
                             strides=1,
                             padding='valid',
                             activation='relu')(inn)
        pool = layers.MaxPool2D(pool_size=(sequence_length - size + 1, 1),
                                padding='valid')(conv)
        cnns.append(pool)
    outt = layers.concatenate(cnns)
    model = keras.Model(inputs=inn, outputs=outt)
    return model
Esempio n. 20
0
 def buildWithDropout(width, height, depth, classes):
     '''
     with dropout net
     :param width:
     :param height:
     :param depth:
     :param classes:
     :return: model
     '''
     model = models.Sequential()
     model.add(layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=(width, height, depth)))
     model.add(layers.MaxPool2D(pool_size=(2, 2)))
     model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
     model.add(layers.MaxPool2D(pool_size=(2, 2)))
     model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), activation='relu'))
     model.add(layers.MaxPool2D(pool_size=(2, 2)))
     model.add(layers.Conv2D(filters=128, kernel_size=(3, 3), activation='relu'))
     model.add(layers.MaxPool2D(pool_size=(2, 2)))
     model.add(layers.Dropout(0.5))  # why set in here
     model.add(layers.Flatten())
     model.add(layers.Dense(units=512, activation='relu'))
     model.add(layers.Dense(units=1, activation='sigmoid'))
     model.summary()
     return model
Esempio n. 21
0
def senet_init_block(x, in_channels, out_channels, name="senet_init_block"):
    """
    SENet specific initial block.

    Parameters:
    ----------
    x : keras.backend tensor/variable/symbol
        Input tensor/variable/symbol.
    in_channels : int
        Number of input channels.
    out_channels : int
        Number of output channels.
    name : str, default 'senet_init_block'
        Block name.

    Returns
    -------
    keras.backend tensor/variable/symbol
        Resulted tensor/variable/symbol.
    """
    mid_channels = out_channels // 2

    x = resnext_conv3x3(x=x,
                        in_channels=in_channels,
                        out_channels=mid_channels,
                        strides=2,
                        groups=1,
                        activate=True,
                        name=name + "/conv1")
    x = resnext_conv3x3(x=x,
                        in_channels=mid_channels,
                        out_channels=mid_channels,
                        strides=1,
                        groups=1,
                        activate=True,
                        name=name + "/conv2")
    x = resnext_conv3x3(x=x,
                        in_channels=mid_channels,
                        out_channels=out_channels,
                        strides=1,
                        groups=1,
                        activate=True,
                        name=name + "/conv3")
    x = nn.MaxPool2D(pool_size=3,
                     strides=2,
                     padding='same',
                     name=name + "/pool")(x)
    return x
Esempio n. 22
0
def create_model_cnn(input_shape: tuple, nb_classes: int, init_with_imagenet: bool = False, learning_rate: float = 0.01):

    model = Sequential()
    model.add(layers.Conv2D(filters=32, kernel_size=3, input_shape=input_shape, activation='relu', padding='same'))
    model.add(layers.MaxPool2D(pool_size=2))

    model.add(layers.Flatten())
    model.add(layers.Dense(64, activation='relu'))
    model.add(layers.Dropout(0.3))
    model.add(layers.Dense(nb_classes, activation='softmax'))

    loss = losses.categorical_crossentropy
    optimizer = optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    model.compile(optimizer, loss, metrics=["accuracy"])

    return model
def create_autoencoder_model():
    """
    This function creates convolutional autoencoder model
    """
    Input = layers.Input(shape=(48,112,1))
    conv1 = layers.Conv2D(8, 3 , activation='relu', padding='same', strides=2)(Input)
    conv2 = layers.Conv2D(4, 3 , activation='relu', padding='same', strides=1)(conv1)
    encode = layers.MaxPool2D(pool_size=2)(conv2)
    up1 = layers.UpSampling2D((2,2))(encode)
    deconv1 = layers.Conv2D(4, 3 , activation='relu', padding='same', strides=1)(up1)
    up2 = layers.UpSampling2D((2,2))(deconv1)
    deconv2 = layers.Conv2D(8, 3 , activation='relu', padding='same', strides=1)(up2)
    deconv3 = layers.Conv2D(1, 3 , activation='sigmoid', padding='same', strides=1)(deconv2)
    encoder = Model(Input, encode)
    autoencoder = Model(Input, deconv3)
    return encoder, autoencoder
Esempio n. 24
0
def build_model():
    print('... construct network')
    inputs = layers.Input((60, 120, 3))
    x = layers.Conv2D(32, 9, activation='relu')(inputs)
    x = layers.Conv2D(32, 9, activation='relu')(x)
    x = layers.MaxPool2D((2, 2))(x)
    x = layers.Dropout(0.25)(x)
    x = layers.Flatten()(x)
    x = layers.Dense(640)(x)
    x = layers.Dropout(0.5)(x)
    out1 = layers.Dense(len(APPEARED_LETTERS), activation='softmax')(x)
    out2 = layers.Dense(len(APPEARED_LETTERS), activation='softmax')(x)
    out3 = layers.Dense(len(APPEARED_LETTERS), activation='softmax')(x)
    out4 = layers.Dense(len(APPEARED_LETTERS), activation='softmax')(x)

    return Model(inputs=inputs, outputs=[out1, out2, out3, out4])
Esempio n. 25
0
def residual_block(x, filters=64, pooling=False, weight=0.1):

    y = layers.Conv2D(filters, 3, activation="relu", padding="same")(x)
    y = layers.BatchNormalization()(y)
    y = layers.Conv2D(filters, 3, activation="relu", padding="same")(y)
    y = layers.BatchNormalization()(y)

    if pooling:
        y = layers.MaxPool2D(2, strides=2)(y)
        residual = layers.Conv2D(filters, 2, strides=2, padding="same")(x)
    else:
        residual = layers.Conv2D(filters, 1, strides=1, padding="same")(x)

    y2 = layers.Lambda(lambda x: x * weight)(y)

    return layers.add([y2, residual])
Esempio n. 26
0
def convnet2(config):
    '''
    Implementation of a simple Convnet for MNIST classification
    INPUTS: config: Config object
            needs to contain fields:
                config['data']['image_size']: image size. MNIST: (28, 28)
                config['data']['filter_sizes']: At least two filter sizes (int)
                config['model']['dense_units']: Per hidden layer, number of neurons
                config['model']['dropout_rates']: Dropout rate per hidden layer
                config['model']['n_classes']: Number of output classes. MNIST: 10.
                config['data']['name']: Name
    '''
    # Load parameters from config
    image_size = config['data']['image_size']
    filter_sizes = config['model']['filter_sizes']
    dropout_rates = config['model']['dropout_rates']
    dense_units = config['model']['dense_units']
    n_classes = config['model']['n_classes']
    model_name = config['model']['name']

    # assert config to be valid
    assert len(filter_sizes
               ) >= 2, "More filters need to be specified in model {}".format(
                   model_name)
    assert len(dropout_rates) >= 2, "Invalid image size in model {}".format(
        model_name)
    if len(dropout_rates) == 1:
        # if only one dropout rate specified, use it for both dropout layers
        dropout_rates = [dropout_rates[0] for _ in range(2)]

    # define architecture (you can use Sequential instead)
    inputs = layers.Input((image_size[0], image_size[1], 1))
    c1 = layers.Conv2D(filter_sizes[0], kernel_size=(3, 3),
                       activation='relu')(inputs)
    c2 = layers.Conv2D(filter_sizes[1], kernel_size=(3, 3),
                       activation='relu')(c1)
    p1 = layers.MaxPool2D(pool_size=(2, 2))(c2)
    d1 = layers.Dropout(dropout_rates[0])(p1)
    f1 = layers.Flatten()(d1)
    de1 = layers.Dense(units=dense_units, activation='relu')(f1)
    d2 = layers.Dropout(dropout_rates[1])(de1)
    outputs = layers.Dense(units=n_classes, activation='softmax')(d2)

    # define model
    model = models.Model(inputs, outputs)
    model.name = model_name
    return model
Esempio n. 27
0
def inception_modulde(x,
                      filters_1x1,
                      filters_3x3_reduce,
                      filters_3x3,
                      filters_5x5_reduce,
                      filters_5x5,
                      filters_pool_proj,
                      name=None):
    conv_1x1 = layers.Conv2D(filters_1x1, (1, 1),
                             padding='same',
                             activation='relu',
                             kernel_initializer=kernel_init,
                             bias_initializer=bias_init)(x)

    conv_3x3 = layers.Conv2D(filters_3x3_reduce, (1, 1),
                             padding='same',
                             activation='relu',
                             kernel_initializer=kernel_init,
                             bias_initializer=bias_init)(x)
    conv_3x3 = layers.Conv2D(filters_3x3, (3, 3),
                             padding='same',
                             activation='relu',
                             kernel_initializer=kernel_init,
                             bias_initializer=bias_init)(conv_3x3)

    conv_5x5 = layers.Conv2D(filters_5x5_reduce, (1, 1),
                             padding='same',
                             activation='relu',
                             kernel_initializer=kernel_init,
                             bias_initializer=bias_init)(x)
    conv_5x5 = layers.Conv2D(filters_5x5, (5, 5),
                             padding='same',
                             activation='relu',
                             kernel_initializer=kernel_init,
                             bias_initializer=bias_init)(conv_5x5)

    pool_proj = layers.MaxPool2D((3, 3), strides=(1, 1), padding='same')(x)
    pool_proj = layers.Conv2D(filters_pool_proj, (1, 1),
                              padding='same',
                              activation='relu',
                              kernel_initializer=kernel_init,
                              bias_initializer=bias_init)(pool_proj)

    output = layers.concatenate([conv_1x1, conv_3x3, conv_5x5, pool_proj],
                                axis=3,
                                name=name)
    return output
Esempio n. 28
0
def residual_connection(x,features_size=True):
    #在keras实现残差连接的方法是恒等残差连接
    if x is None:
        x = np.random.randint((4, 4, 4, 4))  # 四维向量
    #如果特征尺寸相同
    if features_size:
        y = layers.Conv2D(128,3,activation='relu',padding='same')(x)
        y = layers.Conv2D(128,3,activation='relu',padding='same')(y)
        y = layers.Conv2D(128,3,activation='relu',padding='same')(y)
        y = layers.add([y,x]) #将原始特征和输出特征相加
    else:
        y = layers.Conv2D(128,3,activation='relu',padding='same')(x)
        y = layers.Conv2D(128,3,activation='relu',padding='same')(y)
        y = layers.MaxPool2D(2,strides=2)(y)

        residual = layers.Conv2D(128,1,strides=2,padding='same')(x)
        y = layers.add([y,residual])
Esempio n. 29
0
def cascade_Net(input_tensor):
    filters = [16, 32, 64, 96, 128, 192, 256, 512]
    conv0 = L.Conv2D(filters[2], (3, 3), padding='same')(input_tensor)
    # conv0 = L.BatchNormalization(axis=-1)(conv0)
    conv0 = L.advanced_activations.LeakyReLU(alpha=0.2)(conv0)
    conv0 = cascade_block(conv0, filters[2])
    conv0 = L.MaxPool2D(pool_size=(2, 2), padding='same')(conv0)

    # conv1 = L.Conv2D(filters[4], (1, 1), padding='same')(conv0)
    # conv1 = L.BatchNormalization(axis=-1)(conv1)
    conv1 = L.advanced_activations.LeakyReLU(alpha=0.2)(conv0)
    # conv1 = L.GaussianNoise(stddev=0.05)(conv1)
    conv1 = cascade_block(conv1, nb_filter=filters[4])
    # conv2 = L.Conv2D(filters[4], (1,1), padding='same')(conv1)
    conv_flt = L.Flatten()(conv1)
    # conv_flt=L.Dense(512,activation='relu')(conv_flt)
    return conv_flt
Esempio n. 30
0
def ResNet_v1(ly_input, c):
    origin = layers.Conv2D(64,
                           kernel_size=(7, 7),
                           strides=(2, 2),
                           padding='same')(input)
    origin = BatchAct(origin)

    origin = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2),
                              padding='same')(origin)
    origin = ShortCut(origin, 128, 256)
    origin = ShortCut(origin, 256, 512)
    origin = ShortCut(origin, 512, 1024)
    origin = ShortCut(origin, 1024, 2048)

    feature = layers.GlobalAveragePooling2D()(origin)
    output = layers.Dense(nb_output)
    return output