示例#1
0
def get_model_resnext50(img_size, batch_size):

    inputs = Input(shape=(img_size, img_size, 3), batch_size=batch_size)

    base_model = ResNeXt50(input_tensor=inputs)
    x = base_model.outputs
    out1 = GlobalMaxPooling2D()(x)
    out2 = GlobalAveragePooling2D()(x)
    out3 = Flatten()(x)
    out = Concatenate(axis=-1)([out1, out2, out3])
    out = Dropout(0.5)(out)

    # and a logistic layer
    out = Dense(1, activation="sigmoid", name="3_")(out)

    # Create model.
    model = tf.keras.Model(inputs, out)
    model.compile(optimizer=Adam(learning_rate=0.00007,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=1e-08,
                                 use_locking=False,
                                 name='Adam'),
                  loss='binary_crossentropy',
                  metrics=['acc'])

    return model
    def __init__(self, block_list, initial_filters=64):  #默認輸出深度是64
        super(ResNet18, self).__init__()
        self.num_blocks = len(block_list)  #共有幾個block
        self.block_list = block_list
        self.out_filters = initial_filters
        # 第一層卷積
        self.c1 = Conv2D(self.out_filters, (3, 3),
                         strides=1,
                         padding="same",
                         use_bias=False)  #採用64個3*3的卷積核,步長為1,全零填充
        self.b1 = BatchNormalization()  #批標準化BN
        self.a1 = Activation("relu")  #relu激活函數
        self.blocks = tf.keras.models.Sequential()

        # 構建8個ResNet網絡結構,有8個ResNet塊,每一個ResNet塊有2層卷積,一共是18層網絡
        # 第一個ResNet塊是2個實線跳連的ResNet塊,實線計算公式為: H(x) = F(x) + x
        # 第二,三,四ResNet塊是先虛線再實線的ResNet塊,虛線計算公式為:H(x) = F(x) + W(x)
        for block_id in range(
                len(block_list)):  #第幾個resnet block #循環次數由參數列表元素個數決定
            for layer_id in range(block_list[block_id]):  #第幾個卷積層
                if block_id != 0 and layer_id == 0:  #對 除了第一個blokc以外的每個block的輸入進行下採樣
                    block = ResnetBlock(
                        self.out_filters, strides=2,
                        residual_path=True)  #residual_path=True用虛線連接
                else:
                    block = ResnetBlock(
                        self.out_filters,
                        residual_path=False)  #residual_path=False用實線連接
                self.blocks.add(block)  #將構建好的block加入resnet

            self.out_filters *= 2  #下一個block的卷積核數是上一個block的2倍
        self.p1 = GlobalMaxPooling2D()  #平均全局池化
        self.f1 = Dense(10,
                        activation="softmax",
                        kernel_regularizer=tf.keras.regularizers.l2())  #全連接
示例#3
0
    def __init__(self,
                 num_blocks,
                 num_classes,
                 init_ch=16,
                 **kwargs):  #默認輸出深度是16(init_ch=16)
        super(Inception10, self).__init__(**kwargs)
        self.in_channels = init_ch
        self.out_channels = init_ch
        self.num_blocks = num_blocks
        self.init_ch = init_ch
        self.c1 = ConvBNRelu(init_ch)
        self.blocks = tf.keras.models.Sequential()
        for block_id in range(num_blocks):
            for layer_id in range(2):  #每2個Inception結構塊組成一個block
                if layer_id == 0:
                    block = InceptionBlk(
                        self.out_channels, strides=2
                    )  #每個block中的第一個Inception結構塊,卷積步長是2,令第一個Inception結構塊輸出特征圖尺寸減半
                else:
                    block = InceptionBlk(self.out_channels,
                                         strides=1)  #第二個Inception結構塊卷積步長是1
                self.blocks.add(block)

            #enlarger out_channels per block
            #block_0設置的通道數是16,經過個四個分支,輸出的深度為 4*16=64
            #在這裡給通道數加倍了,所有block_1通道數是block_0通道數的兩倍是32,同樣經過四個分支輸出的深度是 4*32=128,這128個通道的數據會被送入平均池化,送入10個分類的全連接
            self.out_channels *= 2  #第一個Inception結構塊,卷積步長是2,令第一個Inception結構塊輸出特征圖尺寸減半,因此我們把輸出特征圖深度加深,盡可能保證特征提取中信息的承載量一致
        self.p1 = GlobalMaxPooling2D()  #全局池化
        self.f1 = Dense(num_classes, activation="softmax")  #全連接
 def __init__(self, n_hidden=128, num_classes=10, last_linear='cosine'):
     super(ConvNet, self).__init__()
     self.features = tf.keras.Sequential([
         ConvBlock(in_channels=1,
                   out_channels=32,
                   kernel_size=3,
                   strides=1,
                   padding=0,
                   data_format='channels_last',
                   name='features/conv1'),
         ConvBlock(in_channels=32,
                   out_channels=64,
                   kernel_size=3,
                   strides=1,
                   padding=0,
                   data_format='channels_last',
                   name='features/conv2'),
         ConvBlock(in_channels=64,
                   out_channels=64,
                   kernel_size=3,
                   strides=1,
                   padding=0,
                   data_format='channels_last',
                   name='features/conv2'),
         GlobalMaxPooling2D(data_format='channels_last', name='pool'),
         Dense(units=n_hidden, name='features/fc1'),
         ReLU(name='features/relu')
     ])
     if last_linear == 'cosine':
         self.last_linear = CosineLinear(num_features=n_hidden,
                                         num_classes=num_classes)
     else:
         self.last_linear = Dense(num_classes, use_bias=False)
示例#5
0
def convert_to_fcn(model, classes=2, activation='softmax',
                   pooling='avg', features=False, model_type='alexnet'):
    """
    Converts a given CNN model to a FCN model
    Args:
        model: The model object
        classes: Number of classes
        activation: Type of activation for the last layer
        pooling: Pooling type for generating features
        features: Whether to return convolutional features or apply global pooling and activation
        model_type: The type of CNN. Support alexnet, vgg16, and resnet50
    Returns:
        Model object
    """
    num_filters = 4096
    if 'resnet' in model_type:
        num_filters = 2048
    x = Conv2D(filters=num_filters, kernel_size=(6, 6), strides=(1, 1), padding='valid')(model.output)
    x = Conv2D(filters=num_filters, kernel_size=(1, 1), strides=(1, 1), padding='valid')(x)
    x = Conv2D(filters=classes, kernel_size=(1, 1), strides=(1, 1), padding='valid')(x)

    if features:
        if pooling == 'avg':
            x = Lambda(lambda x: K.mean(x, axis=-1))(x)
        else:
            x = Lambda(lambda x: K.max(x, axis=-1))(x)
        x = Flatten(name='fcn_features')(x)
    else:
        x = GlobalMaxPooling2D()(x)
        x = Activation(activation)(x)
    return Model(model.input, x)
def resnet_infogan_discriminator(latent_vars=8, size=128):
    img_in = Input(shape=(size, size, 1), name="disc_img_in")

    x = res_block(64, norm="spectral")(img_in)
    x = res_block(64, stride=2, norm="spectral")(x)
    x = res_block(64, stride=2, norm="spectral")(x)
    x = res_block(128, stride=2, norm="spectral")(x)
    x = res_block(256, stride=2, norm="spectral")(x)
    x = res_block(512, stride=2, norm="spectral")(x)
    x_avg = GlobalAveragePooling2D()(x)
    x_max = GlobalMaxPooling2D()(x)
    x = Concatenate()([x_avg, x_max])

    l = dense_block(64, norm="spectral")(x)
    l = dense_block(64, norm="spectral")(l)
    latent_out = SNDense(latent_vars, activation="linear")(l)

    batch_std = BatchStd()(x)
    sx = Concatenate()([x, batch_std])
    sx = dense_block(256, norm="spectral")(sx)
    sx = SNDense(1, activation='linear')(sx)

    inputs = [img_in]
    out = [sx, latent_out]

    model = Model(inputs=inputs, outputs=out, name="disc")
    return model
示例#7
0
    def _channel_attention(_inputs, cbam_ratio=8):
        channel = K.int_shape(_inputs)[-1]

        shared_layer_one = Dense(channel // cbam_ratio,
                                 activation='relu',
                                 kernel_initializer='he_normal',
                                 use_bias=True,
                                 bias_initializer='zeros',
                                 name=name + '_sl1')
        shared_layer_two = Dense(channel,
                                 kernel_initializer='he_normal',
                                 use_bias=True,
                                 bias_initializer='zeros',
                                 name=name + '_sl2')

        avg_pool = GlobalAveragePooling2D(name=name + '_gap')(_inputs)
        avg_pool = Reshape((1, 1, channel))(avg_pool)
        avg_pool = shared_layer_one(avg_pool)
        avg_pool = shared_layer_two(avg_pool)

        max_pool = GlobalMaxPooling2D(name=name + '_gmp')(_inputs)
        max_pool = Reshape((1, 1, channel))(max_pool)
        max_pool = shared_layer_one(max_pool)
        max_pool = shared_layer_two(max_pool)

        cbam_feature = Add()([avg_pool, max_pool])
        cbam_feature = _activation(cbam_feature,
                                   activation='sigmoid',
                                   name=name + '_sigmoid')

        return Multiply()([_inputs, cbam_feature])
示例#8
0
    def __init__(self, include_top=True, input_tensor=None, input_shape=None, pooling=None, classes=8631):
        super(ResNet50Model, self).__init__()
        self.include_top = include_top

        input_shape = _obtain_input_shape(input_shape, default_size=224, min_size=32, data_format=K.image_data_format(),
                                          require_flatten=include_top)

        if input_tensor is None:
            img_input = Input(shape=input_shape)
        else:
            if not K.is_keras_tensor(input_tensor):
                img_input = Input(tensor=input_tensor, shape=input_shape)
            else:
                img_input = input_tensor

        x = ResNet50Component(include_top=include_top)(img_input)
        # x = AveragePooling2D((7, 7), name='avg_pool')(x)

        x = Flatten()(x)

        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

        if input_tensor is not None:
            inputs = get_source_inputs(input_tensor)
        else:
            inputs = img_input

        super(ResNet50Model, self).__init__(inputs, x, name='vggface_resnet50')
示例#9
0
def build_model():
    """Build a simple CNN."""
    input_layer = Input(shape=(None, None, 3))
    x = Conv2D(32, (3, 3), activation='selu')(input_layer)
    # x = BatchNormalization()(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Conv2D(32, (3, 3), activation='selu')(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)

    x = Conv2D(64, (4, 4), activation='selu')(x)
    # x = BatchNormalization()(x)
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.5)(x)

    x = Conv2D(128, (1, 1))(x)

    # this allows for pooling without losing image dimension flexibility
    x = GlobalMaxPooling2D()(x)

    # some fully connected on top
    x = Dense(64, activation='selu')(x)
    x = Dropout(0.5)(x)
    output_layer = Dense(6, activation='softmax')(x)
    model = Model(inputs=input_layer, outputs=output_layer)

    return model
示例#10
0
def get_model(n_classes=1):

    base_model = ResNet50(weights='imagenet', include_top=False)

    #for layer in base_model.layers:
    #    layer.trainable = False

    x = base_model.output
    x = GlobalMaxPooling2D()(x)
    x = Dropout(0.5)(x)
    x = Dense(100, activation="relu")(x)
    x = Dropout(0.5)(x)
    if n_classes == 1:
        x = Dense(n_classes, activation="sigmoid")(x)
    else:
        x = Dense(n_classes, activation="softmax")(x)

    base_model = Model(base_model.input, x, name="base_model")
    if n_classes == 1:
        base_model.compile(loss="binary_crossentropy",
                           metrics=['acc'],
                           optimizer="adam")
    else:
        base_model.compile(loss="sparse_categorical_crossentropy",
                           metrics=['acc'],
                           optimizer="adam")

    return base_model
示例#11
0
def generate_model(hyper_space):
    model = VGG16(weights='imagenet', include_top=False)
    #print(model.summary())
    x = model.get_layer(hyper_space['last_layer']).output

    #Congelando o treinamento
    model.trainable = False
    for i in range(0, hyper_space['qtd_conv']):
        x = Convolution2D(512, 3, 3, activation='relu')(x)

    if hyper_space['pooling'] == 'AVG':
        x = GlobalAveragePooling2D(name='avg_pool')(x)
    else:
        x = GlobalMaxPooling2D(name='max_pool')(x)

    for i in range(0, hyper_space['qtd_dense']):
        x = Dense(hyper_space['neurons'], activation='relu')(x)
    x = Dense(hyper_space['neurons'], activation='relu')(x)
    x = Dropout(hyper_space['dropout'])(x)

    x = Dense(5, activation='softmax')(x)

    for layer in model.layers:
        layer.trainable = False

    model_final = Model(inputs=model.inputs, outputs=x)
    model_final.compile(optimizer=hyper_space['optmizer'],
                        loss="categorical_crossentropy",
                        metrics=["accuracy"])

    return model_final
示例#12
0
    def __init__(self, learning_rate, epsilon):
        """
        inputs:
        -learning rate (float): learning rate of the Nadam optimizer
        -epsilon (float): amount of added noise in the adversarial objective function
        """
        super().__init__()

        self.c_param = 0
        self.learning_rate = learning_rate
        self.epsilon = epsilon

        self.effnet_base = EfficientNetB7(weights="imagenet",
                                          include_top=False,
                                          input_shape=(32, 32, 3))
        self.effnet_base.trainable = True
        layer = self.effnet_base.layers[-2].output
        layer = GlobalMaxPooling2D()(layer)
        layer = Dropout(0.2)(layer)
        layer = Dense(32)(layer)
        layer = Dense(10)(layer)
        output = Activation("softmax")(layer)
        self.model = Model(inputs=self.effnet_base.inputs, outputs=[output])

        self.loss = tf.keras.losses.CategoricalCrossentropy()
        self.optimizer = tf.keras.optimizers.Nadam(self.learning_rate)
示例#13
0
    def __init__(self):
        """Init method.

        We define here a simple (shallow) CNN.
        """
        self.num_train_samples = 0
        self.num_feat = 1
        self.num_labels = 1
        self.is_trained = False

        self.model = Sequential()
        self.model.add(Conv2D(8, (3, 3), input_shape=(40, 40, 3)))
        self.model.add(Activation('relu'))
        self.model.add(BatchNormalization(axis=-1, epsilon=2e-5,momentum=0.9))#
        self.model.add(Conv2D(16, (1, 1), strides=(2,2), kernel_regularizer=l2(0.0001)))
        # self.model.add(MaxPooling2D(pool_size=(2,2)))
        self.model.add(BatchNormalization(axis=-1, epsilon=2e-5,momentum=0.9))
        self.model.add(Conv2D(32, (2,2),strides=(1,1), kernel_regularizer=l2(0.0001)))               
        # self.model.add(MaxPooling2D(pool_size=(2,2)))
        self.model.add(Activation('relu'))
        self.model.add(BatchNormalization(axis=-1, epsilon=2e-5,momentum=0.9))#
        self.model.add(Conv2D(32, (1, 1),strides=(1,1), kernel_regularizer=l2(0.0001))) 
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2,2)))
        self.model.add(GlobalMaxPooling2D())
        # self.model.add(Dense(16, activation='relu'))

        # self.model.add(Activation('relu'))
        self.model.add(Dense(1))
        self.model.add(Activation('sigmoid'))

        self.model.compile(loss='binary_crossentropy', optimizer='adam',
                           metrics=['accuracy'])
示例#14
0
def cbam_block(x, ratio=8):
	# channel_attention

	filters = x.shape[-1]

	avg_pool_ch = GlobalAveragePooling2D()(x)
	avg_pool_ch = Reshape((1, 1, filters))(avg_pool_ch)
	avg_pool_ch = Dense(filters // ratio)(avg_pool_ch)
	avg_pool_ch = Activation('relu')(avg_pool_ch)
	avg_pool_ch = Dense(filters)(avg_pool_ch)

	max_pool_ch = GlobalMaxPooling2D()(x)
	max_pool_ch = Reshape((1, 1, filters))(max_pool_ch)
	max_pool_ch = Dense(filters // ratio)(max_pool_ch)
	max_pool_ch = Activation('relu')(max_pool_ch)
	max_pool_ch = Dense(filters)(max_pool_ch)

	cbam_ch = Add()([avg_pool_ch, max_pool_ch])
	cbam_ch = Activation('sigmoid')(cbam_ch)

	cbam_ch = Multiply()([x, cbam_ch])

	# spatial_attention
	kernel_size = 7
	avg_pool_s = Lambda(lambda x: K.mean(x, axis=3, keepdims=True))(cbam_ch)
	max_pool_s = Lambda(lambda x: K.max(x, axis=3, keepdims=True))(cbam_ch)
	concat = Concatenate(axis=3)([avg_pool_s, max_pool_s])
	cbam_s = Conv2D(filters=1, kernel_size=kernel_size, strides=1, padding='same')(concat)
	cbam_s = Activation('sigmoid')(cbam_s)

	cbam = Multiply()([cbam_ch, cbam_s])

	return cbam
def SR_model(num_classes,
             dropout,
             mc_dropout,
             input_dim,
             training,
             pooling='avg'):
    inputs = Input(input_dim)
    base_model = EfficientNetB0(include_top=False,
                                weights='imagenet',
                                input_tensor=inputs)
    base_model.trainable = True
    x = base_model.output
    x = Dropout(dropout, name='top_dropout_1')(x, training=training)
    if pooling == 'avg':
        x = GlobalAveragePooling2D(name='avg_pool')(x)
    elif pooling == 'max':
        x = GlobalMaxPooling2D(name='max_pool')(x)
    x = Dropout(dropout, name='top_dropout_2')(x, training=training)
    x = Dense(512, activation='relu', name='dense_512')(x)
    x = BatchNormalization()(x)
    x = Dropout(dropout, name='top_dropout_3')(x, training=training)
    x = Lambda(lambda x: K.dropout(x, level=mc_dropout))(x)

    #classification head (f)
    sr = Dense(num_classes, activation='softmax', name='dense_f')(x)
    return Model(inputs=inputs, outputs=sr)
def get_model(base_model,
              layer,
              input_shape,
              classes=6,
              activation="softmax",
              dropout=None,
              pooling="avg",
              weights=None,
              pretrained="imagenet"):
    base = base_model(input_shape=input_shape,
                      include_top=False,
                      weights=pretrained)
    if pooling == "avg":
        x = GlobalAveragePooling2D()(base.output)
    elif pooling == "max":
        x = GlobalMaxPooling2D()(base.output)
    elif pooling is None:
        x = Flatten()(base.output)
    if dropout is not None:
        x = Dropout(dropout)(x)
    x = Dense(classes, activation=activation)(x)
    model = Model(inputs=base.input, outputs=x)
    if weights is not None:
        model.load_weights(weights)
    for l in model.layers[:layer]:
        l.trainable = False
    return model
示例#17
0
 def __init__(self, lr, num_Classes, model_name):
     self.model_name = model_name
     self.model = Sequential()
     if model_name not in ['VGG', 'Inception', 'Resnet']:
         print("Pretrained Model is Not Available")
     if model_name == 'VGG':
         base_model = VGG16(include_top=False, weights='imagenet')
     elif model_name == 'Inception':
         base_model = InceptionV3(include_top=False, weights='imagenet')
     else:
         base_model = ResNet50(include_top=False, weights='imagenet')
     base_model.trainable = False
     self.model.add(base_model)
     self.model.add(GlobalMaxPooling2D())
     self.model.add(Dropout(0.3))
     self.model.add(Dense(512, activation='relu'))
     self.model.add(Dropout(0.2))
     self.model.add(Dense(256, activation='relu'))
     self.model.add(Dropout(0.2))
     self.model.add(Dense(128, activation='relu'))
     self.model.add(Dropout(0.2))
     self.model.add(Dense(64, activation='relu'))
     self.model.add(Dense(num_Classes, activation='sigmoid'))
     print(self.model.summary())
     self.model.compile(optimizer=Adam(lr),
                        loss=binary_crossentropy,
                        metrics=['accuracy'])
示例#18
0
 def getModel():
     X_input = X = Input([32, 32, 3])
     X = Conv2D(18,
                kernel_size=6,
                strides=1,
                activation='relu',
                padding='valid')(X)
     X = denseBlock(X, 6, 18, 1, 'same', 'selu')
     X = denseBlock(X, 6, 18, 1, 'same', 'selu')
     X = denseBlock(X, 6, 18, 1, 'same', 'selu')
     X = transition_block(X, 6, 18, 'same', 'selu', 'avg', 5)
     X = denseBlock(X, 2, 18, 1, 'same', 'relu')
     X = denseBlock(X, 2, 18, 1, 'same', 'relu')
     X = denseBlock(X, 2, 18, 1, 'same', 'relu')
     X = denseBlock(X, 2, 18, 1, 'same', 'relu')
     X = transition_block(X, 2, 18, 'same', 'relu', 'avg', 2)
     X = denseBlock(X, 5, 18, 1, 'same', 'selu')
     X = denseBlock(X, 5, 18, 1, 'same', 'selu')
     X = denseBlock(X, 5, 18, 1, 'same', 'selu')
     X = denseBlock(X, 5, 18, 1, 'same', 'selu')
     X = denseBlock(X, 5, 18, 1, 'same', 'selu')
     X = transition_block(X, 5, 18, 'same', 'selu', 'max', 4)
     X = GlobalMaxPooling2D()(X)
     X = Dense(10, activation='softmax')(X)
     model = Model(inputs=X_input, outputs=X)
     return model
示例#19
0
def Conv2DModel(model_name,
                input_shape,
                kernel_col,
                kernels=64,
                kernel_rows=3,
                learning_rate=0.01,
                regularization=None,
                dropout=None):
    K.clear_session()

    model = Sequential(name=model_name)
    regularizer = regularization

    model.add(
        Conv2D(kernels, (kernel_rows, kernel_col),
               strides=(1, 1),
               input_shape=input_shape,
               kernel_regularizer=regularizer,
               name='conv0'))
    if dropout != None and type(dropout) == float:
        model.add(Dropout(dropout))
    model.add(Activation('relu'))

    model.add(GlobalMaxPooling2D())
    model.add(Flatten())
    model.add(Dense(1, activation='sigmoid', name='fc1'))

    print(model.summary())
    compileModel(model, learning_rate)
    return model
示例#20
0
def FCN_model(len_classes=10, dropout_rate=0.2):
    """ Initialize Generator object.
    Args
        len_classes            : Number of classes
        dropout_rate           : Rate of dropout to be used
    """
    input = Input(shape=(None, None, 1))

    x = Conv2D(32, (3, 3), activation='relu')(input)
    x = MaxPooling2D((2, 2))(x)
    x = Dropout(dropout_rate)(x)
    x = BatchNormalization()(x)

    x = Conv2D(64, (3, 3), activation='relu')(x)
    x = MaxPooling2D((2, 2))(x)
    x = Dropout(dropout_rate)(x)
    x = BatchNormalization()(x)

    x = Conv2D(100, (3, 3), activation='relu')(x)
    x = MaxPooling2D((2, 2))(x)
    x = Dropout(dropout_rate)(x)
    x = BatchNormalization()(x)

    x = Conv2D(len_classes, (1, 1))(x)
    x = Dropout(dropout_rate)(x)
    x = BatchNormalization()(x)
    x = GlobalMaxPooling2D()(x)
    predictions = Activation('softmax')(x)

    model = Model(inputs=input, outputs=predictions)

    print(model.summary())
    print(f'Total number of layers for FCN: {len(model.layers)}')

    return model
示例#21
0
    def init_model(self):
        '''
        Create and save model as class attribute.
        '''

        i = Input(shape=(self.image_size, self.image_size, 3))
        x = Conv2D(32, (3, 3), activation='relu')(i)
        x = MaxPooling2D(2, 2)(x)
        x = BatchNormalization()(x)
        x = Dropout(0.2)(x)
        x = Conv2D(64, (3, 3), activation='relu')(x)
        x = MaxPooling2D(2, 2)(x)
        x = BatchNormalization()(x)
        x = Dropout(0.2)(x)
        x = Conv2D(128, (3, 3), activation='relu')(x)
        x = MaxPooling2D(2, 2)(x)
        x = BatchNormalization()(x)
        x = GlobalMaxPooling2D()(x)
        x = Dense(1024,
                  kernel_regularizer=l2(0.01),
                  bias_regularizer=l2(0.01),
                  activation='relu')(x)
        x = Dropout(0.2)(x)
        x = Dense(2)(x)  #output is a dense of size 2 (x,y)

        self.model = Model(inputs=i, outputs=x)
        print(self.model.summary())
示例#22
0
def get_model_cnn(
        input_shape=(None, None, 1),
        n_classes=10,
):
    inputs = Input(input_shape)

    x = Convolution2D(64, kernel_size=3, activation="relu")(inputs)
    x = MaxPooling2D(pool_size=2)(x)

    x = Convolution2D(64, kernel_size=3, activation="relu")(x)
    x = MaxPooling2D(pool_size=2)(x)

    x = Convolution2D(64, kernel_size=3, activation="relu")(x)

    x = GlobalMaxPooling2D()(x)

    out1 = Dense(2, activation="linear")(x)
    out = Dense(10, activation="relu")(out1)
    out = Dense(n_classes, activation="softmax")(out)

    model = Model(inputs, out)
    model.compile(optimizer=Adam(0.0001),
                  loss=categorical_crossentropy,
                  metrics=["acc"])

    model_aux = Model(inputs, out1)
    model_aux.compile(optimizer=Adam(0.0001),
                      loss=categorical_crossentropy,
                      metrics=["acc"])

    model.summary()

    return model, model_aux
示例#23
0
 def getModel():
     X_input = X = Input([32, 32, 3])
     X = id_block(X, 4, 3, 'selu')
     X = Conv2D(18,
                kernel_size=6,
                strides=2,
                activation='selu',
                padding='valid')(X)
     X = id_block(X, 4, 18, 'relu')
     X = Conv2D(36,
                kernel_size=7,
                strides=3,
                activation='tanh',
                padding='same')(X)
     X = id_block(X, 5, 36, 'tanh')
     X = AveragePooling2D(pool_size=2, strides=2, padding='same')(X)
     X = Conv2D(72,
                kernel_size=3,
                strides=2,
                activation='selu',
                padding='valid')(X)
     X = GlobalMaxPooling2D()(X)
     X = Dense(10, activation='softmax')(X)
     model = Model(inputs=X_input, outputs=X)
     return model
示例#24
0
 def getModel():
     X_input = X = Input([32, 32, 3])
     X = Conv2D(18,
                kernel_size=2,
                strides=2,
                activation='tanh',
                padding='valid')(X)
     X = MaxPooling2D(pool_size=7, strides=5, padding='same')(X)
     X = Conv2D(36,
                kernel_size=4,
                strides=3,
                activation='selu',
                padding='same')(X)
     X = denseBlock(X, 5, 36, 3, 'same', 'relu')
     X = denseBlock(X, 5, 36, 3, 'same', 'relu')
     X = transition_block(X, 5, 36, 'same', 'relu', 'max', 1)
     X = Conv2D(72,
                kernel_size=5,
                strides=5,
                activation='tanh',
                padding='same')(X)
     X = GlobalMaxPooling2D()(X)
     X = Dense(10, activation='softmax')(X)
     model = Model(inputs=X_input, outputs=X)
     return model
示例#25
0
def conv2d_cnn():
    model = Sequential()
    model.add(
        Embedding(input_dim=2**16, output_dim=output_dim,
                  input_length=max_len))
    model.add(
        Reshape((max_len, output_dim, 1), input_shape=(max_len, output_dim)))
    model.add(
        Conv2D(filters=filters,
               kernel_size=(kernel_size, output_dim),
               strides=(1, 1),
               padding='valid'))
    model.add(GlobalMaxPooling2D())

    model.add(Dense(2**6))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))

    model.add(Dense(10))
    model.add(Activation('softmax'))
    adam = optimizers.Adam(lr=0.001)
    model.compile(loss='binary_crossentropy',
                  optimizer=adam,
                  metrics=['accuracy'])
    model.summary()
    early_stopping = EarlyStopping(patience=10)
    history = model.fit(x_train,
                        y_train,
                        validation_split=0.1,
                        epochs=epochs,
                        batch_size=batch_size,
                        verbose=1,
                        callbacks=[early_stopping])

    return model, history
示例#26
0
 def getModel():
     X_input = X = Input([32, 32, 3])
     X = conv_block(X, 6, 18, 'relu', 1)
     X = conv_block(X, 7, 36, 'selu', 7)
     X = GlobalMaxPooling2D()(X)
     X = Dense(10, activation='softmax')(X)
     model = Model(inputs=X_input, outputs=X)
     return model
示例#27
0
def build_model(in_layer, filters, n_out):

    x = Resblock(in_layer,
                 filters,
                 batch_activate_output=False,
                 name='Resblock1')
    x = Resblock(x, filters, name='Resblock2')
    x = MaxPool2D(name='Pool1')(x)
    x = Dropout(0.1)(x)

    x = Resblock(x, filters * 2, name='Resblock3', after_pool=True)
    x = MaxPool2D(name='Pool2')(x)
    x = Dropout(0.1)(x)

    x = Resblock(x, filters * 4, name='Resblock4', after_pool=True)
    x = MaxPool2D(name='Pool3')(x)
    x = Dropout(0.1)(x)

    x = Resblock(x, filters * 2, name='Resblock5', after_pool=True)
    x = Resblock(x, filters, name='Resblock6', after_pool=True)

    x_vp = Conv2D(512, (1, 1), strides=(1, 1),
                  kernel_initializer='he_normal')(x)
    x_vp = Dense(256,
                 activation='relu',
                 name='Dense_vp',
                 kernel_initializer='he_normal')(x_vp)
    x_vp = GlobalMaxPooling2D(name='Pool_vp')(x_vp)
    output_vp = Dense(n_out,
                      activation='linear',
                      name='vp_output',
                      kernel_initializer='he_normal')(x_vp)

    x_vs = Conv2D(512, (1, 1), strides=(1, 1),
                  kernel_initializer='he_normal')(x)
    x_vs = Dense(256,
                 activation='relu',
                 name='Dense_vs',
                 kernel_initializer='he_normal')(x_vs)
    x_vs = GlobalMaxPooling2D(name='Pool_vs')(x_vs)
    output_vs = Dense(n_out,
                      activation='linear',
                      name='vs_output',
                      kernel_initializer='he_normal')(x_vs)

    return output_vp, output_vs
示例#28
0
 def __init__(self):
     super(Discriminator, self).__init__()
     self.layer_1 = Conv2D(64, (3, 3), strides=(2, 2), padding='same')
     self.layer_2 = LeakyReLU(alpha=0.2)
     self.layer_3 = Conv2D(128, (3, 3), strides=(2, 2), padding='same')
     self.layer_4 = LeakyReLU(alpha=0.2)
     self.layer_5 = GlobalMaxPooling2D()
     self.layer_6 = Dense(1)
 def train_model(self):
     """ Training the model """
     print("Training the model")
     LR = 1e-3
     epochs = 200
     callbacks = [
         EarlyStopping(monitor='val_loss',
                       min_delta=0,
                       patience=30,
                       verbose=0,
                       mode='auto'),
         ModelCheckpoint('model.h5',
                         monitor='val_loss',
                         mode='min',
                         save_best_only=True),
         ReduceLROnPlateau(monitor='val_loss',
                           factor=0.1,
                           patience=10,
                           verbose=0,
                           mode='auto',
                           min_delta=0.0001,
                           cooldown=0,
                           min_lr=0)
     ]
     # Pre trained model Xception without fully connected layers
     base_model = Xception(input_shape=(self.img_size[0], self.img_size[1],
                                        3),
                           include_top=False,
                           weights='imagenet')
     # Unfreeze the layers
     base_model.trainable = True
     x = GlobalMaxPooling2D()(base_model.output)
     x = Dense(512, activation='relu')(x)
     x = Dense(10, activation='relu')(x)
     output = Dense(1, activation='linear')(x)
     model = Model(inputs=base_model.input, outputs=output)
     model.compile(loss='mse',
                   optimizer=Adam(learning_rate=LR),
                   metrics=[self.mae_in_months])
     print(base_model.summary())
     print(model.summary())
     history = model.fit_generator(
         self.train_datagen.flow(self.x_train,
                                 self.y_train,
                                 batch_size=self.batch_size),
         steps_per_epoch=len(self.x_train) / self.batch_size,
         validation_data=self.val_datagen.flow(self.x_val,
                                               self.y_val,
                                               batch_size=self.batch_size),
         validation_steps=len(self.x_val) / self.batch_size,
         callbacks=callbacks,
         epochs=epochs,
         verbose=1)
     self.plot_it(history)
     model.load_weights('model.h5')
     pred = self.mean_bone_age + self.std_bone_age * (model.predict(
         self.x_val, batch_size=self.batch_size, verbose=True))
     actual = self.mean_bone_age + self.std_bone_age * (self.y_val)
示例#30
0
 def getModel():
     X_input = X = Input([32, 32, 3])
     X = Conv2D(18, kernel_size=2, strides=2, activation='relu', padding='valid')(X)
     X = Conv2D(36, kernel_size=5, strides=3, activation='selu', padding='valid')(X)
     X = Conv2D(72, kernel_size=6, strides=5, activation='tanh', padding='same')(X)
     X = GlobalMaxPooling2D()(X)
     X = Dense(10, activation='softmax')(X)
     model = Model(inputs=X_input, outputs=X)
     return model