def feature_extractor_network(self): # input in_image = Input(shape = in_shape) # C1 Layer nett = Conv2D(32,(5,5))(in_image) nett = BatchNormalization()(nett) nett = LeakyReLU(alpha = 0.2)(nett) # M2 Layer nett = MaxPooling2D(pool_size = (3,3))(nett) # C3 Layer nett = Conv2D(64,(3,3)) nett = BatchNormalization(pool_size = (3,3))(nett) nett = LeakyReLU(alpha = 0.2)(nett) # L4 Layer nett = LocallyConnected2D(128,(3,3))(nett) # L5 Layer nett = LocallyConnected2D(256,(3,3))(nett) # F6 Layer nett = Dense(512,activation='relu')(nett) nett = Dropout(0.2)(nett) # F7 Layer out_features = Dense(activation='tanh')(nett) # output model = Model(inputs = in_image, outputs = out_features) return model
def _create_model_2(self, input_tensor, input_shape, num_classes, dropout_rate): #input_tensor = Input(shape=input_shape) model = Sequential() model.add( Conv2D(32, kernel_size=(5, 5), padding='same', activation='relu', input_shape=input_shape[1:])) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add( Conv2D(64, kernel_size=(3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dropout(dropout_rate)) if 1 == num_classes: model.add(Dense(1, activation='sigmoid')) #model.add(Dense(1, activation='sigmoid', activity_regularizer=keras.regularizers.activity_l2(0.0001))) elif num_classes >= 2: model.add(Dense(num_classes, activation='softmax')) #model.add(Dense(num_classes, activation='softmax', activity_regularizer=keras.regularizers.activity_l2(0.0001))) else: assert num_classes > 0, 'Invalid number of classes.' # Display the model summary. #model.summary() return model(input_tensor)
def _create_model_1(self, input_tensor, num_classes, dropout_rate): x = Conv2D(32, kernel_size=(5, 5), padding='same', activation='relu')(input_tensor) x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x) x = Conv2D(64, kernel_size=(3, 3), padding='same', activation='relu')(x) x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x) x = Flatten()(x) x = Dense(1024, activation='relu')(x) x = Dropout(dropout_rate)(x) if 1 == num_classes: x = Dense(1, activation='sigmoid')(x) #x = Dense(1, activation='sigmoid', activity_regularizer=keras.regularizers.activity_l2(0.0001))(x) elif num_classes >= 2: x = Dense(num_classes, activation='softmax')(x) #x = Dense(num_classes, activation='softmax', activity_regularizer=keras.regularizers.activity_l2(0.0001))(x) else: assert num_classes > 0, 'Invalid number of classes.' #model = Model(inputs=input_tensor, outputs=x) return x
def zero_padding_block(input_tensor, filters, stage, block, se_enabled=False, se_ratio=16): numFilters1, numFilters2 = filters if K.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + '_' + str(block) + '_branch' bn_name_base = 'bn' + str(stage) + '_' + str(block) + '_branch' # downsampling directly by convolution with stride 2 x = Conv2D(numFilters1, (3, 3), strides=(2, 2), kernel_initializer='he_normal', name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = Conv2D(numFilters2, (3, 3), kernel_initializer='he_normal', name=conv_name_base + '2b')(x) # squeeze and excitation block if se_enabled: x = squeeze_excitation_block(x, ratio=se_ratio) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) # zero padding and downsampling with 1x1 conv shortcut connection x_shortcut = Conv2D(1, (1, 1), strides=(2, 2), kernel_initializer='he_normal', name=conv_name_base + '1')(input_tensor) x_shortcut2 = MaxPooling2D(pool_size=(1, 1), strides=(2, 2), border_mode='same')(input_tensor) x_shortcut = Lambda(zeropad, output_shape=zeropad_output_shape)(x_shortcut) x_shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(x_shortcut) # addition of shortcut x = Add()([x, x_shortcut]) x = Activation('relu')(x) return x
def projection_bottleneck_block(input_tensor, filters, stage, block, se_enabled=False, se_ratio=16): numFilters1, numFilters2, numFilters3 = filters if K.image_data_format() == 'channels_last': bn_axis = -1 else: bn_axis = 1 conv_name_base = 'res' + str(stage) + '_' + str(block) + '_branch' bn_name_base = 'bn' + str(stage) + '_' + str(block) + '_branch' x = Conv2D(numFilters1, (1, 1), strides=(2, 2), kernel_initializer='he_normal', name=conv_name_base + '2a')(input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = Conv2D(numFilters2, (3, 3), padding='same', kernel_initializer='he_normal', name=conv_name_base + '2b')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = Conv2D(numFilters3, (1, 1), kernel_initializer='he_normal', name=conv_name_base + '2c')(x) # squeeze and excitation block if se_enabled: x = squeeze_excitation_block(x, ratio=se_ratio) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) # projection shortcut x_shortcut = Conv2D(numFilters3, (1, 1), strides=(2, 2), kernel_initializer='he_normal', name=conv_name_base + '1')(input_tensor) x_shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(x_shortcut) x = Add()([x, x_shortcut]) x = Activation('relu')(x) return x
def resnet_layer(inputs, num_filters=16, kernel_size=3, strides=1, activation='relu', batch_normalization=True, conv_first=True): conv = Conv2D(num_filters, kernel_size=kernel_size, strides=strides, padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(1e-4)) x = inputs if conv_first: x = conv(x) if batch_normalization: x = BatchNormalization()(x) if activation is not None: x = Activation(activation)(x) else: if batch_normalization: x = BatchNormalization()(x) if activation is not None: x = Activation(activation)(x) x = conv(x) return x
def init_model(): #K.clear_session() tf.reset_default_graph() model = Sequential() model.add(Conv2D(16, (3, 3), input_shape=(28, 28, 1), padding = "SAME", activation = "relu")) model.add(Conv2D(32, (3, 3), activation='relu')) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) for layer in model.layers: print(layer.output_shape) model.compile(loss='sparse_categorical_crossentropy', optimizer=keras.optimizers.Adadelta(lr=0.1), metrics=['accuracy']) return model
def discriminator_network(self): # input in_image = Input(shape=self.img_shape) # C1 layer nett = Conv2D(64,(5,5))(in_image) nett = BatchNormalization()(nett) nett = LeakyReLU(alpha = 0.2)(nett) # C2 layer nett = Conv2D(128,(5,5))(nett) nett = BatchNormalization()(nett) nett = LeakyReLU(alpha = 0.2)(nett) nett = Dropout(0.2)(nett) # C3 layer nett = Conv2D(256,(5,5))(nett) nett = BatchNormalization()(nett) nett = LeakyReLU(alpha = 0.2)(nett) nett = Dropout(0.2)(nett) # F4 layer nett = Flatten()(nett) validity = Dense(1,alpha = 0.2)(nett) #output model = Model(inputs = in_image, outputs = validity) return model
import tf as tf from tf.keras.layers import Input, Conv2D from tf.keras import Model input = Input((3072, 3072, 3)) x = Conv2D(filters=32, kernel_size=(11, 11), strides=(3, 3), padding="same")(input) x = Conv2D(filters=32, kernel_size=(11, 11), strides=(2, 2), padding="same")(x) x = Conv2D(filters=32, kernel_size=(11, 11), strides=(2, 2), padding="same")(x) # x = Conv2D(filters=128,kernel_size=(11,11),strides=(2,2),padding="same")(x) # x = Conv2D(filters=64,kernel_size=(11,11),padding="same")(x) # x = Conv2D(filters=64,kernel_size=(11,11),padding="same")(x) # x = Conv2D(filters=64,kernel_size=(11,11),strides=(2,2),padding="same")(x) # # x = Conv2D(filters=128,kernel_size=(11,11),padding="same")(x) # x = Conv2D(filters=128,kernel_size=(11,11),padding="same")(x) # x = Conv2D(filters=128,kernel_size=(11,11),strides=(2,2),padding="same")(x) model = Model(inputs=input, outputs=x) model.summary()