def cnn_seq(num_filters=16, kernel_size=3, strides=1, activation=ReLU, batch_normalization=True, conv_first=True): seq = Sequential() seq.add( Conv2D(num_filters, kernel_size=kernel_size, strides=strides, padding='same', kernel_regularizer=l2(5e-3))) if batch_normalization: seq.add(BatchNormalization_v2()) seq.add(activation()) seq.add( Conv2D(num_filters, kernel_size=kernel_size, strides=1, padding='same', kernel_regularizer=l2(5e-3))) if batch_normalization: seq.add(BatchNormalization_v2()) return seq
def make_model(): model = Sequential() model.add(Input(shape=input_shape)) model.add(Conv2D(32, kernel_size=(3, 3), padding='same')) model.add(ReLU()) model.add(Conv2D(32, kernel_size=(3, 3), padding='same')) model.add(ReLU()) model.add(Conv2D(32, kernel_size=(3, 3), padding='same')) model.add(ReLU()) model.add(Conv2D(32, kernel_size=(3, 3), padding='same')) model.add(ReLU()) model.add(Conv2D(32, kernel_size=(3, 3), padding='same')) model.add(ReLU()) model.add(Flatten()) model.add(Dense(2500, kernel_initializer='He')) model.add(ReLU()) model.add(Dense(1500, kernel_initializer='He')) model.add(ReLU()) model.add(Dense(10, kernel_initializer='He')) model.add(Softmax()) model.summary() model.compile(Adam(), 'categorical_crossentropy', 'accuracy') return model
def make_model(): model = Sequential() model.add(Input(shape=input_shape)) model.add(Conv2D(32, kernel_size=(3, 3), padding='same')) model.add(BN_LAYER()) model.add(ReLU()) model.add(Conv2D(32, kernel_size=(3, 3), padding='same')) model.add(BN_LAYER()) model.add(ReLU()) model.add(MaxPooling2D(2, 2, stride=2)) model.add(Conv2D(64, kernel_size=(3, 3), padding='same')) model.add(BN_LAYER()) model.add(ReLU()) model.add(Conv2D(64, kernel_size=(3, 3), padding='same')) model.add(BN_LAYER()) model.add(ReLU()) model.add(MaxPooling2D(2, 2, stride=2)) model.add(Flatten()) model.add(Dense(512, kernel_initializer='He')) model.add(BN_LAYER()) model.add(ReLU()) model.add(Dense(10, kernel_initializer='He')) model.add(Softmax()) model.summary() model.compile(Adam(), loss='categorical_crossentropy', metric='accuracy') return model
def make_model(): model = Sequential() model.add(Input(shape=input_shape)) model.add( Conv2D(16, kernel_size=3, strides=1, padding='same', kernel_regularizer=l2(1e-4))) model.add(BatchNormalization_v2()) model.add(ReLU()) add_residual_block(model, num_filters=16) add_residual_block(model, num_filters=16) add_residual_block(model, num_filters=16) add_residual_block(model, num_filters=32, strides=2, cnn_shortcut=True) add_residual_block(model, num_filters=32) add_residual_block(model, num_filters=32) add_residual_block(model, num_filters=64, strides=2, cnn_shortcut=True) add_residual_block(model, num_filters=64) add_residual_block(model, num_filters=64) model.add(AveragePooling2DAll()) model.add(Flatten()) model.add(Dense(10, kernel_initializer='He')) model.add(Softmax()) model.summary() model.compile(Adam(lr=0.001, decay=1e-4), 'categorical_crossentropy', 'accuracy') return model
def add_residual_block(model, num_filters=16, kernel_size=3, strides=1, activation=ReLU, cnn_shortcut=False, batch_normalization=True, conv_first=True): if cnn_shortcut: shortcut = Conv2D(num_filters, kernel_size=1, strides=strides, padding='same', kernel_regularizer=l2(5e-3)) else: shortcut = Same() cnn_road = cnn_seq(num_filters, kernel_size, strides, activation, batch_normalization, conv_first) model.add(Separate()) model.add([shortcut, cnn_road]) model.add(Add()) model.add(ReLU()) return model
def make_my_yolo(input_shape, num_anchors, num_classes): ori_c, ori_w, ori_h = input_shape model = Sequential() model.add(Input(shape=input_shape)) add_conv2d_bn_leaky(model, 16, 3) model.add(MaxPooling2D(2, 2)) add_conv2d_bn_leaky(model, 32, 3) model.add(MaxPooling2D(2, 2)) add_conv2d_bn_leaky(model, 64, 3) model.add(MaxPooling2D(2, 2)) add_conv2d_bn_leaky(model, 128, 3) model.add(MaxPooling2D(2, 2)) model.add(Conv2D(num_anchors * (4 + 1 + num_classes), 1, 1)) model.add( Reshape( (num_anchors, (4 + 1 + num_classes), ori_w // 16, ori_h // 16))) model.add(Transpose((0, 1, 3, 4, 2))) mask = np.ones((3, 7, 7, 7), dtype=bool) mask[..., 2] = False mask[..., 3] = False model.add(MaskedSigmoid(mask)) return model
def add_conv2d_bn_leaky(model, num_filters=16, kernel_size=3, strides=1): model.add( Conv2D(num_filters, kernel_size, strides, padding='same', use_bias=False)) model.add(BatchNormalization_v2()) model.add(LeakyReLU(0.1))
def add_resnet_layer(model, num_filters=16, kernel_size=3, strides=1, activation='relu', batch_normalization=True, conv_first=True): conv = Conv2D(num_filters, kernel_size, strides=strides, padding='same', kernel_regularizer=l2(l2_lambda)) if conv_first: model.add(conv) if batch_normalization: model.add(BatchNormalization_v2()) if activation is not None: model.add(Activation(activation)) else: if batch_normalization: model.add(BatchNormalization_v2()) if activation is not None: model.add(Activation(activation)) model.add(conv) return model
def make_darknet19(input_shape): model = Sequential() model.add(Input(shape=input_shape)) add_conv2d_bn_leaky(model, 32, 3) model.add(MaxPooling2D(2, 2)) add_conv2d_bn_leaky(model, 64, 3) model.add(MaxPooling2D(2, 2)) add_conv2d_bn_leaky(model, 128, 3) add_conv2d_bn_leaky(model, 64, 1) add_conv2d_bn_leaky(model, 128, 3) model.add(MaxPooling2D(2, 2)) add_conv2d_bn_leaky(model, 256, 3) add_conv2d_bn_leaky(model, 128, 1) add_conv2d_bn_leaky(model, 256, 3) model.add(MaxPooling2D(2, 2)) add_conv2d_bn_leaky(model, 512, 3) add_conv2d_bn_leaky(model, 256, 1) add_conv2d_bn_leaky(model, 512, 3) add_conv2d_bn_leaky(model, 256, 1) add_conv2d_bn_leaky(model, 512, 3) model.add(MaxPooling2D(2, 2)) add_conv2d_bn_leaky(model, 1024, 3) add_conv2d_bn_leaky(model, 512, 1) add_conv2d_bn_leaky(model, 1024, 3) add_conv2d_bn_leaky(model, 512, 1) add_conv2d_bn_leaky(model, 1024, 3) add_conv2d_bn_leaky(model, 1000, 1) model.add(Conv2D(5 * (4 + 1 + 1), 1, 1)) return model