def __init__(self, filter_num1, filter_num2, filter_num3): super(FeatureExtruder_Depth3, self).__init__() self.conv1 = Conv2D(filters=filter_num1, kernel_size=5, strides=1, padding='same', name='conv1') self.conv1_avgpool = AvgPool2D(pool_size=2, strides=2, padding='valid', name='conv1_avgpool') self.conv1_act = Activation(activation='tanh') self.conv2 = Conv2D(filters=filter_num2, kernel_size=5, strides=1, padding='same', name='conv2') self.conv2_avgpool = AvgPool2D(pool_size=2, strides=2, padding='valid', name='conv2_avgpool') self.conv2_act = Activation(activation='tanh') self.conv3 = Conv2D(filters=filter_num3, kernel_size=5, strides=1, padding='same', name='conv3') self.conv3_act = Activation(activation='tanh')
def createModel(): model = Sequential([ Conv2D(32, 3, activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)), MaxPooling2D(pool_size=(2, 2), padding="same"), Conv2D(32, 3, activation='relu'), MaxPooling2D(pool_size=(2, 2)), Conv2D(64, 3, activation='relu'), Conv2D(250, 3, activation='relu'), Conv2D(32, 3, activation='relu'), AvgPool2D(pool_size=(2, 2)), Conv2D(32, 3, activation='relu'), AvgPool2D(pool_size=(2, 2)), Conv2D(32, 3, activation='relu'), MaxPooling2D(pool_size=(2, 2)), Flatten(), Dense(512, activation='relu'), Dropout(0.2), Dense(2) ]) model.compile( optimizer='adam', loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True), metrics=['accuracy']) return model
def create_model(input_shape): optimizer = Adam(lr=0.001) model = Sequential() model.add( Conv2D(6, (5, 5), padding='same', activation='relu', input_shape=input_shape)) model.add(AvgPool2D(pool_size=(2, 2))) model.add(Conv2D(16, (5, 5), padding='valid', activation='relu')) model.add(AvgPool2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(120, activation='relu')) model.add(Dense(84, activation='relu')) model.add(Dense(10, activation='softmax')) model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=['acc']) return model
def create_model(input_shape): model = Sequential() # first hidden layer with 100 neutrons model.add( Conv2D( filters=4, kernel_size=(3, 3), padding='same', activation="relu", input_shape=(28, 28, 1), )) model.add(AvgPool2D(pool_size=(2, 2))) #second layer model.add(Conv2D(16, (5, 5), padding='valid', activation='relu')) model.add(AvgPool2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(100, activation='relu')) model.add(Dense(100, activation='relu')) model.add(Dense(units=10, activation='softmax')) # set algorithm class to configure hyperparameters optimizer = Adam(lr=0.01) model.compile( optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['acc'], ) model.summary() return model
def __init__(self, random_seed, serial, seed_serial): # Settings for Leaky ReLU self.leaky_relu_alpha = 0.1 # Ordinary settings self.author = "Andreas" self.name = self.__class__.__name__ self.description = "Leaky ReLU with alpha: " + str( self.leaky_relu_alpha) self.serial = serial self.ss = seed_serial self.seed = random_seed # Setting up seed for repeatability # More info on https://github.com/NVIDIA/tensorflow-determinism os.environ['TF_DETERMINISTIC_OPS'] = '1' rn.seed(random_seed) np.random.seed(random_seed) tf.random.set_seed(random_seed) self.timestamp = self.get_timestamp() self.epochs = 50 # prediction [15 = 89.45%, 20 = 89.67%, 25 = 91.36%, 30 = 91.36%, 35 = 91.36%] self.batch_size = 32 self.verbose = 1 self.model = Sequential() self.model.add( Conv2D(filters=6, kernel_size=(3, 3), activation=tf.keras.layers.LeakyReLU( alpha=self.leaky_relu_alpha), input_shape=(32, 32, 3))) self.model.add(AvgPool2D()) self.model.add( Conv2D(filters=16, kernel_size=(3, 3), activation=tf.keras.layers.LeakyReLU( alpha=self.leaky_relu_alpha))) self.model.add(AvgPool2D()) self.model.add(Flatten()) self.model.add( Dense(units=120, activation=tf.keras.layers.LeakyReLU( alpha=self.leaky_relu_alpha))) self.model.add( Dense(units=84, activation=tf.keras.layers.LeakyReLU( alpha=self.leaky_relu_alpha))) self.model.add(Dense(units=43, activation='softmax')) optimizer = tf.keras.optimizers.SGD(lr=0.01, momentum=0.9) self.model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=["accuracy"])
def __init__(self): super(NoiseCancelNet,self).__init__() self.encode1 = Conv2D(filters = 128,kernel_size=7,strides=1,padding = 'same',name = 'Conv_1',activation='relu') self.encode1_avgpool = AvgPool2D(pool_size=2,strides=2,padding = 'valid',name = 'AvgPool_1') self.encode2 = Conv2D(filters=256,kernel_size=7,strides=1,padding = 'same',name='Conv_2',activation='relu') self.encode2_avgpool = AvgPool2D(pool_size=2, strides=2, padding='valid', name='AvgPool_1') self.inception1 = InceptionBlock_3a() self.inception2 = InceptionBlock_3b() self.inception3 = InceptionBlock_4a() self.inception4 = InceptionBlock_4b() self.decode1 = Conv2DTranspose(filters=128,kernel_size=7,strides=2,padding='same',name = 'ConvT_2',activation='relu') self.decode2 = Conv2DTranspose(filters=3,kernel_size=7,strides=2,padding='same',name = 'ConvT_1',activation='sigmoid')
def create_model(input_shape): model = Sequential() model.add(Conv2D(input_shape=input_shape, filters=6, padding='same', kernel_size=(5, 5), activation='relu')) model.add(AvgPool2D(pool_size=(2, 2), strides=None)) model.add(Conv2D(kernel_size=(5, 5), filters=12, padding='valid', activation='relu')) model.add(AvgPool2D(pool_size=(2, 2), strides=None)) model.add(Conv2D(kernel_size=(5, 5), filters=24, padding='valid', activation='relu')) model.add(AvgPool2D(pool_size=(2, 2), strides=None)) model.add(Flatten()) model.add(Dense(units=64, activation='relu')) model.add(Dense(units=64, activation='relu')) model.add(Dense(units=12, activation='softmax')) model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=['acc']) return model
def __init__(self): super(self_ResNet101, self).__init__() self.conv1 = Sequential([ Conv2D(filters=64, kernel_size=[7, 7], strides=2, padding='same'), BatchNormalization(), ReLU(), MaxPool2D(pool_size=[3, 3], strides=2) ]) self.layerN = [] # layer1 for i in range(3): self.layerN.append(BasicBlock(32, 64, 1)) # layer2 for i in range(4): self.layerN.append(BasicBlock(64, 128, 1)) # layer3 for i in range(23): self.layerN.append(BasicBlock(128, 256, 1)) # layer4 for i in range(3): self.layerN.append(BasicBlock(256, 512, 1)) self.layerN = Sequential(self.layerN) self.Avg = AvgPool2D(pool_size=[7, 7], strides=1) self.flatten = Flatten() self.fc = Dense(units=3)
def build(self, input_shape): ## downsampling if self.downsampling: if self.mode == 'max': self.pool = MaxPool2D(self.size, padding='same') elif self.mode == 'avg': self.pool = AvgPool2D(self.size, padding='same') elif self.mode == 'global': self.pool = GlobalAvgPool2D() else: raise NotImplementedError(f'No downsampling mode={self.mode}') ## upsampling else: if self.mode == 'pad': if not isinstance(self.size, (tuple, list)): self.size = [self.size] if len(self.size) == 1: self.size = list(self.size) * 2 # this doesn't take into account odd number self.pool = ZeroPadding2D( padding=[(i - 1) * s // 2 for i, s in zip(self.size, input_shape[1:])]) else: self.pool = UpSampling2D(size=self.size, interpolation=self.mode) self.reshape = Reshape((1, 1, input_shape[-1])) return super().build(input_shape)
def unit(x, groups, channels, strides): y = x x = Conv2D(channels // 4, kernel_size=1, strides=1, padding="same", groups=groups) x = BatchNormalization()(x) x = Activation("relu")(x) x = channel_shuffle(x, groups) x = DepthwiseConv2D(kernel_size=3, strides=strides, padding="same")(x) x = BatchNormalization()(x) if strides == 2: channels = channels - y.shape[-1] x = Conv2D(channels, kernel_size=1, strides=1, padding="same", groups=groups)(x) if strides == 1: x = Add()([x, y]) elif strides == 2: y = AvgPool2D(pool_size=3, strides=2, padding="same")(y) x = Concatenate([x, y]) x = Activation('relu')(x) return x
def __init__(self): super(DenseNet, self).__init__() # 卷积层 self.Conv = Conv2D(filters=32, kernel_size=(5, 5), strides=2) self.Pool = MaxPool2D(pool_size=(2, 2), strides=2) # DenseBlock 1 self.layer1 = DenseBlock(6) # Transfer 1 self.TransLayer1 = TransLayer() # DenseBlock 2 self.layer2 = DenseBlock(12) # Transfer 2 self.TransLayer2 = TransLayer() # DenseBlock 3 self.layer3 = DenseBlock(24) # Transfer 3 self.TransLayer3 = TransLayer() # DenseBlock 4 self.layer4 = DenseBlock(16) # Transfer 4 self.TransLayer4 = AvgPool2D(pool_size=(7, 7)) self.softmax = Dense(3)
def __init__(self, input_shape, group_size, pool_size=8, activation=ReLU, k=1, **kwargs): super(WideResidualNetwork, self).__init__(input_shape=input_shape, dynamic=True, **kwargs) self.groups = [ Conv2D(input_shape=input_shape, filters=WideResidualNetwork.FILTER_SIZES[0], kernel_size=(3, 3), strides=WideResidualNetwork.STRIDES[0], padding='same') ] self.groups.extend([ Group(n=group_size, filters=WideResidualNetwork.FILTER_SIZES[i], stride=WideResidualNetwork.STRIDES[i], activation=activation, k=k) for i in range(1, len(WideResidualNetwork.FILTER_SIZES)) ]) self.groups.append(AvgPool2D(pool_size=pool_size)) self.pool_size = pool_size
def Dehaze(img_shape=(256, 256, 3)): img_input = Input(img_shape, name='img_input') trans = transmission_map_generator(img_shape)(img_input) atmos = atmospheric_light_generator(img_shape)(img_input) # $trans_{reciprocal} = \frac{1}{trans + 10^{-10}}$ trans_reciprocal = Lambda( function=lambda x: 1 / (K.abs(x) + 10**-10))(trans) atmos = compose( AvgPool2D(), LeakyReLU(0.2), UpSampling2D() )(atmos) # $dehaze = (input - atmos) \times trans^{-1} + atmos$ dehaze = Subtract()([img_input, atmos]) dehaze = Multiply()([dehaze, trans_reciprocal]) dehaze = Add()([dehaze, atmos]) dehaze = compose( Concatenate(), Conv2D(6, kernel_size=3, strides=1, padding='same'), LeakyReLU(alpha=0.2), Conv2D(20, kernel_size=3, strides=1, padding='same'), LeakyReLU(alpha=0.2), Concat_Samping_Block([32, 16, 8, 4], kernel_size=1), Conv2D(3, kernel_size=3, strides=1, padding='same'), Activation('tanh') )([dehaze, img_input]) return Model(inputs=[img_input], outputs=[dehaze, trans, atmos])
def InceptionI2D(input_shape, num_classes=400, dropout_rate=0.1, name='inception_i3d'): video_input = tf.keras.layers.Input(shape=input_shape) net = Unit2D(filters=64 , kernel_size=[7, 7], strides=[2, 2], name='Conv3d_1a_7x7')(video_input) net = MaxPool2D(pool_size=[3,3], strides=[2,2], padding='SAME', name='MaxPool3d_2a_3x3')(net) net = Unit2D(filters=64 , kernel_size=[1, 1], name='Conv3d_2b_1x1')(net) net = Unit2D(filters=192, kernel_size=[3, 3], name='Conv3d_2c_3x3')(net) net = MaxPool2D(pool_size=[3,3], strides=[2,2], padding='SAME', name='MaxPool3d_3a_3x3')(net) net = Inc([64, [96, 128], [16, 32], 32 ], 'Mixed_3b')(net) net = Inc([128, [128,192], [32, 96], 64 ], 'Mixed_3c')(net) net = MaxPool2D(pool_size=[3,3], strides=[2,2], padding='SAME', name='MaxPool3d_4a_3x3')(net) net = Inc([192, [96, 208], [16, 48], 64 ], 'Mixed_4b')(net) net = Inc([160, [112,224], [24, 64], 64 ], 'Mixed_4c')(net) net = Inc([128, [128,256], [24, 64], 64 ], 'Mixed_4d')(net) net = Inc([112, [144,288], [32, 64], 64 ], 'Mixed_4e')(net) net = Inc([256, [160,320], [32,128], 128], 'Mixed_4f')(net) net = MaxPool2D(pool_size=[2,2], strides=[2,2], padding='SAME', name='MaxPool3d_5a_2x2')(net) net = Inc([256, [160,320], [32,128], 128], 'Mixed_5b')(net) net = Inc([384, [192,384], [48,128], 128], 'Mixed_5c')(net) with tf.name_scope('Logits'): net = AvgPool2D(pool_size=[7,7], strides=[1,1], padding='VALID')(net) net = Dropout(dropout_rate)(net) logits = Unit2D(filters=num_classes, kernel_size=[1, 1], activation=None, use_batch_norm=False, use_bias=True, name='Conv3d_0c_1x1')(net) logits = tf.squeeze(logits, [2,], name='SpatialSqueeze') # logits = AvgPool3D(pool_size=[7,1,1], strides=[1,1,1], padding='VALID')(logits) # logits = tf.keras.layers.Flatten()(logits) # logits = tf.keras.layers.Reshape([num_classes])(logits) averaged_logits = tf.reduce_mean(logits, axis=1) prediction = tf.keras.activations.softmax(averaged_logits) return tf.keras.Model(inputs=video_input, outputs=prediction)
def simple_cnn_ccc_loss(self): model = Sequential() model.add(BatchNormalization(input_shape=self.input_shape)) model.add( Conv2D(64, (5, 5), padding='same', activation='relu', use_bias=False)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, (5, 5), activation='relu', use_bias=False)) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(256, (5, 5), activation='relu', use_bias=False)) model.add(AvgPool2D(pool_size=(9, 9), strides=9)) model.add(Flatten()) model.add(Dense(300)) model.add(Dropout(0.5)) model.add(Dense(2)) opt = SGD(lr=.01, decay=1e-5, momentum=.9) model.compile(loss=metrics.ccc_loss, optimizer=opt, metrics=[ metrics.rmse, metrics.rmse_v, metrics.rmse_a, metrics.cc_v, metrics.cc_a, metrics.ccc_v, metrics.ccc_a ]) return model
def create_model(num_classes): input_image = Input(shape=(448, 448, 3), name='input_image') base_model = ResNet50(include_top=False, input_tensor=input_image, weights='imagenet', pooling=None) x = base_model.output mask = Conv2D(1, kernel_size=1, padding='valid', use_bias=True, name='mask')(x) mask = AvgPool2D(pool_size=(2, 2), strides=2)(mask) mask = Activation(activation='tanh')(mask) mask = Reshape((49,), name='loc')(mask) x = AvgPool2D(pool_size=(14, 14))(x) x = Reshape((2048,))(x) out1 = Dense(num_classes, use_bias=False, name='cls')(x) out2 = Dense(2, use_bias=False, name='adv')(x) return Model(input_image, [out1, out2, mask], name='dcl')
def build(self, input_shape): n,h,w,c = input_shape self.dense_units = c // self.reduction_ratio self.avg_pool = AvgPool2D(pool_size=(h,w)) self.fc = tf.keras.Sequential([ Dense(units=self.dense_units, activation='relu', use_bias=False,kernel_regularizer=l2(self.decay)), Dense(units=c,activation='sigmoid',use_bias=False,kernel_regularizer=l2(self.decay)) ])
def transition_layer(self, x, s_scope): with tf.name_scope(s_scope): x = self.Batch_Normalization(x, self.b_training, s_scope + '_batch_normal_0') x = Activation('relu', name=s_scope + '_relu1')(x) x = Conv2D(filters=4 * self.filters, kernel_size=1, padding='SAME', name=s_scope + '_conv_1')(x) x = Dropout(rate=dropout_rate, trainable=self.b_training)(x) x = AvgPool2D(pool_size=2, strides=2)(x) return x
def _down_block(tensor, num_filters, kernel_size=3, padding='same', strides=2, shortcut=True, activation='lrelu', dropout_rate=None, dropout_wrn=False, down_sampling='strided_conv', initializer='orthogonal', batchnorm=True, name=''): tensor = Conv2D(kernel_size=1, filters=num_filters)(tensor) tensor = _resnet_block(tensor, num_filters, kernel_size, shortcut=shortcut, padding=padding, activation=activation, initializer='orthogonal', dropout_rate=dropout_rate, dropout_wrn=dropout_wrn, batchnorm=batchnorm, name=name) skip_tensor = tensor # down-sampling if down_sampling == 'strided_conv': tensor = Conv2D(filters=num_filters, kernel_size=kernel_size * 2 - 1, strides=strides, padding=padding, kernel_initializer=initializer)(tensor) elif down_sampling == 'maxpool': tensor = Conv2D(filters=num_filters, kernel_size=kernel_size, padding=padding, kernel_initializer=initializer)(tensor) tensor = MaxPool2D(strides)(tensor) elif down_sampling == 'avgpool': tensor = Conv2D(filters=num_filters, kernel_size=kernel_size, padding=padding, kernel_initializer=initializer)(tensor) tensor = AvgPool2D(strides)(tensor) else: raise ValueError( 'down_sampling should be one of [ \'strided_conv\', \'maxpool\' ]') return tensor, skip_tensor
def GoogleNet(inputs, class_num=5, aux_logits=False): #接受的为(224,224,3) x = layers.Conv2D(64, kernel_size=7, strides=2, padding="SAME", activation="relu", name="conv2d_1")(inputs) x = layers.MaxPool2D(pool_size=3, strides=2, padding="SAME", name="maxpool_1")(x) x = layers.Conv2D(64, kernel_size=1, activation="relu", name="conv2d_2")(x) x = layers.Conv2D(192, kernel_size=3, padding="SAME", activation="relu", name="conv2d_3")(x) x = layers.MaxPool2D(pool_size=3, strides=2, padding="SAME", name="maxpool_2")(x) # Inception模块 x = Inception(64, 96, 128, 16, 32, 32, x, name="inception_3a") x = Inception(128, 128, 192, 32, 96, 64, x, name="inception_3b") x = MaxPool2D(pool_size=3, strides=2, padding="SAME", name="maxpool_3")(x) # Inception模块 x = Inception(192, 96, 208, 16, 48, 64, x, name="inception_4a") # 判断是否使用辅助分类器1。训练时使用,测试时去掉。 if aux_logits: aux1 = InceptionAux(class_num, x, name="aux_1") # Inception模块 x = Inception(160, 112, 224, 24, 64, 64, x, name="inception_4b") x = Inception(128, 128, 256, 24, 64, 64, x, name="inception_4c") x = Inception(112, 144, 288, 32, 64, 64, x, name="inception_4d") # 判断是否使用辅助分类器2。训练时使用,测试时去掉。 if aux_logits: aux2 = InceptionAux(class_num, x, name="aux_2") # Inception模块 x = Inception(256, 160, 320, 32, 128, 128, x, name="inception_4e") x = MaxPool2D(pool_size=3, strides=2, padding="SAME", name="maxpool_4")(x) # Inception模块 x = Inception(256, 160, 320, 32, 128, 128, x, name="inception_5a") x = Inception(384, 192, 384, 48, 128, 128, x, name="inception_5b") # 平均池化层 x = AvgPool2D(pool_size=7, strides=1, name="avgpool_1")(x) # 拉直 x = Flatten(name="output_flatten")(x) x = Dropout(rate=0.4, name="output_dropout")(x) x = Dense(class_num, name="output_dense")(x) aux3 = Softmax(name="aux_3")(x) # 判断是否使用辅助分类器 if aux_logits: model = Model(inputs=inputs, outputs=[aux1, aux2, aux3]) else: model = Model(inputs=inputs, outputs=aux3) return model
def cnn(input_shape=[28, 28]): inputs = Input(input_shape) #x = tf.reshape(inputs, [-1]+input_shape+[3]) x = Conv2D(8, (3, 3), activation='relu', use_bias=False)(inputs) x = AvgPool2D(strides=2)(x) x = Conv2D(16, (3, 3), activation='relu', use_bias=False)(x) x = GlobalAvgPool2D()(x) x = Dense(10, activation='softmax', use_bias=False)(x) return Model(inputs=inputs, outputs=[x])
def InceptionAux(num_classes, inputs, name): x = AvgPool2D(pool_size=5, strides=3, name=name + 'pool')(inputs) x = Conv2D(128, 1, activation='relu', name=name + 'conv')(x) x = Flatten(name=name + 'flat')(x) x = Dropout(rate=0.5, name=name + 'drop1')(x) x = Dense(1024, activation='relu', name=name + 'dense1')(x) x = Dropout(rate=0.5, name=name + 'drop2')(x) x = Dense(num_classes, name=name + 'dense2')(x) x = Softmax(name=name + 'softmax')(x) return x
def create_model(input_shape): model = Sequential() model.add( Conv2D(6, (5, 5), padding='same', activation='relu', input_shape=(28, 28, 1))) model.add(AvgPool2D(pool_size=(2, 2))) model.add(Conv2D(filters=16, kernel_size=(5, 5), activation='relu')) model.add(AvgPool2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(units=120, activation='relu')) model.add(Dense(units=84, activation='relu')) model.add(Dense(units=10, activation='softmax')) optimizer = Adam(lr=0.001) model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['acc']) return model
def build(width, height, number_of_classes, final_activation='softmax'): input_size = (height, width, 3) if backend.image_data_format() == 'channels_first': input_size = (3, height, width) input_tensor=Input(input_size) initial = Conv2D(64, (7,7), strides=2, activation='relu')(input_tensor) max_pooling_initial = MaxPooling2D(pool_size=(2,2))(initial) batch_1_batchNorm = BatchNormalization()(max_pooling_initial) batch_1_activ = Activation('relu')(batch_1_batchNorm) batch_1_conv2d_1 = Conv2D(128, (1,1), activation='relu', padding='same')(batch_1_activ) batch_1_drop = Dropout(0.3)(batch_1_conv2d_1) batch_1_conv2d_2 = Conv2D(32, (3,3), activation='relu', padding='same')(batch_1_drop) batch_2 = Concatenate()([max_pooling_initial, batch_1_conv2d_2]) batch_2_batchNorm = BatchNormalization()(batch_2) batch_2_activ = Activation('relu')(batch_2_batchNorm) batch_2_conv2d_1 = Conv2D(128, (1,1), activation='relu', padding='same')(batch_2_activ) batch_2_drop = Dropout(0.4)(batch_2_conv2d_1) batch_2_conv2d_2 = Conv2D(32, (3,3), activation='relu', padding='same')(batch_2_drop) batch_3 = Concatenate()([batch_2, batch_2_conv2d_2]) batch_3_batchNorm = BatchNormalization()(batch_3) batch_3_activ = Activation('relu')(batch_3_batchNorm) batch_3_conv2d_1 = Conv2D(128, (1,1), activation='relu', padding='same')(batch_3_activ) batch_3_drop = Dropout(0.4)(batch_3_conv2d_1) batch_3_conv2d_2 = Conv2D(32, (3,3), activation='relu', padding='same')(batch_3_drop) batch_4 = Concatenate()([batch_3, batch_3_conv2d_2]) batch_4_batchNorm = BatchNormalization()(batch_4) batch_4_activ = Activation('relu')(batch_4_batchNorm) batch_4_conv2d_1 = Conv2D(128, (1,1), activation='relu', padding='same')(batch_4_activ) batch_4_drop = Dropout(0.4)(batch_4_conv2d_1) batch_4_conv2d_2 = Conv2D(32, (3,3), activation='relu', padding='same')(batch_4_drop) final_batch = Concatenate()([batch_4_conv2d_2, batch_4]) downsampling_batchNorm = BatchNormalization()(final_batch) downsampling_activ = Activation('relu')(downsampling_batchNorm) downsampling_conv2d_1 = Conv2D(32, (1,1), activation='relu')(downsampling_activ) downsampling_avg = AvgPool2D(pool_size=(2,2), strides=2)(downsampling_conv2d_1) flatten = Flatten()(downsampling_avg) top_layer_dense_1 = Dense(1024, activation='relu')(flatten) top_layer_dropout = Dropout(0.4)(top_layer_dense_1) top_layer_dense_2 = Dense(number_of_classes, activation=final_activation)(top_layer_dropout) model = Model(inputs=input_tensor, outputs=top_layer_dense_2) model.summary() return model
def pooling_layer(x, l): if isinstance(l["pool_size"], int): pool_h, pool_w = (l["pool_size"], l["pool_size"]) else: pool_h, pool_w = l["pool_size"] pool_size = (min(pool_h, x.shape[1]), min(pool_w, x.shape[2])) if l["type"] == "avg": return AvgPool2D(pool_size)(x) else: assert l["type"] == "max" return MaxPool2D(pool_size)(x)
def _getDescriminator(self, encoded_dim): """ Build Descriminator Model Based on Paper Configuration Args: encoded_dim (int) : number of latent variables Return: A sequential keras model """ latent_dim = encoded_dim shape = self.shape # build the decoder model latent_inputs = Input(shape=(latent_dim, ), name='discriminator_input') x = Dense(shape[1] * shape[2] * shape[3], kernel_initializer=initializer, bias_initializer=initializer)(latent_inputs) x = Reshape((shape[1], shape[2], shape[3]))(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(filters=32, kernel_size=3, activation='relu', padding='same')(x) x = AvgPool2D(pool_size=2, strides=2)(x) x = Conv2D(filters=16, kernel_size=3, activation='relu')(x) x = AvgPool2D(pool_size=2, strides=2)(x) x = Flatten()(x) outputs = Dense(1, name='discriminator_output', activation='sigmoid', kernel_initializer=initializer, bias_initializer=initializer)(x) # instantiate discriminator model discriminator = Model(latent_inputs, outputs, name='discriminator') discriminator.summary() return discriminator
def first_block(self): inp = Input(shape = (4,4,3)) network = Conv2D(512,(3,3),1,padding='SAME',kernel_initializer=RandomNormal(0,1))(inp) network = LeakyReLU(0.2)(network) network = Conv2D(512,(3,3),1,padding='SAME',kernel_initializer=RandomNormal(0,1))(network) network = LeakyReLU(0.2)(network) network = AvgPool2D(pool_size=(4,4))(network) network = Flatten()(network) network = Dense(512,kernel_initializer= RandomNormal(0,1))(network) network = LeakyReLU(0.2)(network) network = Dense(1,kernel_initializer= RandomNormal(0,1))(network) model = Model(inputs = inp,outputs = network) return model
def classifier_layers(x, input_shape, trainable=False): # compile times on theano tend to be very high, so we use smaller ROI pooling regions to workaround # (hence a smaller stride in the region that follows the ROI pool) if K.backend() == 'tensorflow': x = conv_block_td(x, 3, [512, 512, 2048], stage=5, block='a', input_shape=input_shape, strides=(2, 2), trainable=trainable) elif K.backend() == 'theano': x = conv_block_td(x, 3, [512, 512, 2048], stage=5, block='a', input_shape=input_shape, strides=(1, 1), trainable=trainable) x = identity_block_td(x, 3, [512, 512, 2048], stage=5, block='b', trainable=trainable) x = identity_block_td(x, 3, [512, 512, 2048], stage=5, block='c', trainable=trainable) x = TimeDistributed(AvgPool2D((7, 7)), name='avg_pool')(x) return x
def add_block(self): def no_of_filters(stage): return min(int(8192/(2**(stage+2))),512) inp = Input(shape = (None,None,3)) network = Conv2D(no_of_filters(self.block_no),(3,3),1,padding='SAME',kernel_initializer=RandomNormal(0,1))(inp) network = LeakyReLU(0.2)(network) network = Conv2D(no_of_filters(self.block_no -1),(3,3),1,padding='SAME',kernel_initializer=RandomNormal(0,1))(network) network = LeakyReLU(0.2)(network) network = AvgPool2D()(network) for layer in self.discriminator.layers[3:]: network = layer(network) self.block_no+=1 model = Model(inputs = inp,outputs = network) self.discriminator = model
def __init__(self, filters, strides=(1, 1), **kwargs): self.strides = strides if strides != (1, 1): self.shortcut = Conv2D(filters, (1, 1), padding='same', use_bias=False) self.conv_0 = Conv2D(filters, (3, 3), strides=strides, padding='same', use_bias=False) self.conv_1 = Conv2D(filters, (3, 3), padding='same', use_bias=False) self.bn_0 = BatchNormalization(momentum=0.9, epsilon=1e-5) self.bn_1 = BatchNormalization(momentum=0.9, epsilon=1e-5) self.activation0 = Activation('relu') self.activation1 = Activation('relu') self.avgpool = AvgPool2D((2, 2), strides=(2, 2), padding='same') self.se = SELayer(filters) super(SEBasicBlock, self).__init__(**kwargs)