def cnn_seq(num_filters=16, kernel_size=3, strides=1, activation=ReLU, batch_normalization=True, conv_first=True): seq = Sequential() seq.add( Conv2D(num_filters, kernel_size=kernel_size, strides=strides, padding='same', kernel_regularizer=l2(5e-3))) if batch_normalization: seq.add(BatchNormalization_v2()) seq.add(activation()) seq.add( Conv2D(num_filters, kernel_size=kernel_size, strides=1, padding='same', kernel_regularizer=l2(5e-3))) if batch_normalization: seq.add(BatchNormalization_v2()) return seq
def make_gan_model(generator, discriminator, optimizer): discriminator.trainable = False gan_model = Sequential() gan_model.add(generator) gan_model.add(discriminator) gan_model.compile(loss='binary_crossentropy', optimizer=optimizer, metric=None) return gan_model
def make_discriminator(optimizer): discriminator = Sequential() discriminator.add(Dense(2048, input_shape=image_shape)) discriminator.add(LeakyReLU(0.2)) discriminator.add(Dropout(0.3)) discriminator.add(Dense(1024)) discriminator.add(LeakyReLU(0.2)) discriminator.add(Dropout(0.3)) discriminator.add(Dense(512)) discriminator.add(LeakyReLU(0.2)) discriminator.add(Dropout(0.3)) discriminator.add(Dense(256)) discriminator.add(LeakyReLU(0.2)) discriminator.add(Dropout(0.3)) discriminator.add(Dense(1)) discriminator.add(Sigmoid()) discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metric=None) return discriminator
def make_generator(optimizer): generator = Sequential() generator.add(Dense(256, input_shape=randomDim)) generator.add(LeakyReLU(0.2)) generator.add(Dense(512)) generator.add(LeakyReLU(0.2)) generator.add(Dense(1024)) generator.add(LeakyReLU(0.2)) generator.add(Dense(2048)) generator.add(LeakyReLU(0.2)) generator.add(Dense(image_shape)) generator.add(Sigmoid()) return generator
def make_model(): model = Sequential() model.add(Input(shape=input_shape)) model.add(Dense(4096)) model.add(LeakyReLU(0.2)) model.add(Dense(2048)) model.add(LeakyReLU(0.2)) model.add(Dense(1024)) model.add(LeakyReLU(0.2)) model.add(Dense(512)) model.add(LeakyReLU(0.2)) model.add(Dense(256)) model.add(LeakyReLU(0.2)) model.add(Dense(10)) model.add(Softmax()) model.summary() model.compile(Momentum(), 'categorical_crossentropy', 'accuracy') return model
def make_my_yolo(input_shape, num_anchors, num_classes): ori_c, ori_w, ori_h = input_shape model = Sequential() model.add(Input(shape=input_shape)) add_conv2d_bn_leaky(model, 32, 3) model.add(MaxPooling2D(2, 2)) add_conv2d_bn_leaky(model, 32, 3) model.add(MaxPooling2D(2, 2)) add_conv2d_bn_leaky(model, 64, 3) add_conv2d_bn_leaky(model, 64, 3) model.add(MaxPooling2D(2, 2)) add_conv2d_bn_leaky(model, 64, 3) add_conv2d_bn_leaky(model, 64, 3) model.add(MaxPooling2D(2, 2)) add_conv2d_bn_leaky(model, 128, 3) add_conv2d_bn_leaky(model, 128, 3) model.add(MaxPooling2D(2, 2)) add_conv2d_bn_leaky(model, 128, 3) add_conv2d_bn_leaky(model, 128, 3) add_conv2d_bn_leaky(model, 128, 3) model.add(Conv2D(num_anchors * (4 + 1 + num_classes), 1, 1)) model.add(Reshape((num_anchors, (4 + 1 + num_classes), ori_w // 32, ori_h // 32))) model.add(Transpose((0, 1, 3, 4, 2))) mask = np.ones((num_anchors, 7, 7, 7), dtype=bool) mask[..., 2] = False mask[..., 3] = False model.add(MaskedSigmoid(mask)) return model
def make_model(): model = Sequential() model.add(Input(shape=input_shape)) model.add(Conv2D(32, kernel_size=(3, 3), padding='same')) model.add(ReLU()) model.add(Dropout(0.3)) model.add(Conv2D(32, kernel_size=(3, 3), padding='same')) model.add(ReLU()) model.add(MaxPooling2D(2, 2)) model.add(Flatten()) model.add(Dropout(0.3)) model.add(Dense(512, kernel_initializer='He')) model.add(ReLU()) model.add(Dropout(0.3)) model.add(Dense(10, kernel_initializer='He')) model.add(Softmax()) model.summary() model.compile(Adam(), 'categorical_crossentropy', 'accuracy') return model
def make_model(): model = Sequential() model.add(Input(shape=input_shape)) model.add( Conv2D(16, kernel_size=3, strides=1, padding='same', kernel_regularizer=l2(1e-4))) model.add(BatchNormalization_v2()) model.add(ReLU()) add_residual_block(model, num_filters=16) add_residual_block(model, num_filters=16) add_residual_block(model, num_filters=16) add_residual_block(model, num_filters=32, strides=2, cnn_shortcut=True) add_residual_block(model, num_filters=32) add_residual_block(model, num_filters=32) add_residual_block(model, num_filters=64, strides=2, cnn_shortcut=True) add_residual_block(model, num_filters=64) add_residual_block(model, num_filters=64) model.add(AveragePooling2DAll()) model.add(Flatten()) model.add(Dense(10, kernel_initializer='He')) model.add(Softmax()) model.summary() model.compile(Adam(lr=0.001, decay=1e-4), 'categorical_crossentropy', 'accuracy') return model
def make_darknet19(input_shape): model = Sequential() model.add(Input(shape=input_shape)) add_conv2d_bn_leaky(model, 32, 3) model.add(MaxPooling2D(2, 2)) add_conv2d_bn_leaky(model, 64, 3) model.add(MaxPooling2D(2, 2)) add_conv2d_bn_leaky(model, 128, 3) add_conv2d_bn_leaky(model, 64, 1) add_conv2d_bn_leaky(model, 128, 3) model.add(MaxPooling2D(2, 2)) add_conv2d_bn_leaky(model, 256, 3) add_conv2d_bn_leaky(model, 128, 1) add_conv2d_bn_leaky(model, 256, 3) model.add(MaxPooling2D(2, 2)) add_conv2d_bn_leaky(model, 512, 3) add_conv2d_bn_leaky(model, 256, 1) add_conv2d_bn_leaky(model, 512, 3) add_conv2d_bn_leaky(model, 256, 1) add_conv2d_bn_leaky(model, 512, 3) model.add(MaxPooling2D(2, 2)) add_conv2d_bn_leaky(model, 1024, 3) add_conv2d_bn_leaky(model, 512, 1) add_conv2d_bn_leaky(model, 1024, 3) add_conv2d_bn_leaky(model, 512, 1) add_conv2d_bn_leaky(model, 1024, 3) add_conv2d_bn_leaky(model, 1000, 1) model.add(Conv2D(5 * (4 + 1 + 1), 1, 1)) return model
def make_model(): model = Sequential() model.add(Input(shape=input_shape)) model.add(Dense(4096)) model.add(ReLU()) model.add(Dense(4096)) model.add(ReLU()) model.add(Dense(4096)) model.add(ReLU()) model.add(Dense(4096)) model.add(ReLU()) model.add(Dense(4096)) model.add(ReLU()) model.add(Dense(10)) model.add(Softmax()) model.summary() model.compile(Adam(), 'categorical_crossentropy', 'accuracy') return model
def resnet_v2(input_shape, depth, num_classes): if (depth - 2) % 9 != 0: raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])') num_filters_in = 16 num_res_blocks = int((depth - 2) / 9) model = Sequential() model.add(Input(shape=input_shape)) add_resnet_layer(model, num_filters=num_filters_in, conv_first=True) for stage in range(3): for res_block in range(num_res_blocks): activation = 'relu' batch_normalization = True strides = 1 if stage == 0: num_filters_out = num_filters_in * 4 if res_block == 0: activation = None batch_normalization = False else: num_filters_out = num_filters_in * 2 if res_block == 0: strides = 2 main_route = Sequential() add_resnet_layer(main_route, num_filters=num_filters_in, kernel_size=1, strides=strides, activation=activation, batch_normalization=batch_normalization, conv_first=False) add_resnet_layer(main_route, num_filters=num_filters_in, conv_first=False) add_resnet_layer(main_route, num_filters=num_filters_out, kernel_size=1, conv_first=False) if res_block == 0: short_cut = Sequential() add_resnet_layer(short_cut, num_filters=num_filters_out, kernel_size=1, strides=strides, activation=None, batch_normalization=False) else: short_cut = Same() model.add(Separate()) model.add([short_cut, main_route]) model.add(Add()) num_filters_in = num_filters_out model.add(BatchNormalization_v2()) model.add(Activation('relu')) model.add(AveragePooling2DAll()) model.add(Flatten()) model.add(Dense(10, kernel_initializer='He')) model.add(Softmax()) return model