def __init__(self, channels, init_block_channels, bottleneck, in_channels=3, in_size=(224, 224), classes=1000, data_format="channels_last", **kwargs): super(CbamResNet, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format self.features = tf.keras.Sequential(name="features") self.features.add( ResInitBlock(in_channels=in_channels, out_channels=init_block_channels, data_format=data_format, name="init_block")) in_channels = init_block_channels for i, channels_per_stage in enumerate(channels): stage = tf.keras.Sequential(name="stage{}".format(i + 1)) for j, out_channels in enumerate(channels_per_stage): strides = 2 if (j == 0) and (i != 0) else 1 stage.add( CbamResUnit(in_channels=in_channels, out_channels=out_channels, strides=strides, bottleneck=bottleneck, data_format=data_format, name="unit{}".format(j + 1))) in_channels = out_channels self.features.add(stage) self.features.add( nn.AveragePooling2D(pool_size=7, strides=1, data_format=data_format, name="final_pool")) self.output1 = nn.Dense(units=classes, input_dim=in_channels, name="output1")
def trans_block(x, reduction): """ Construct a Transition Block x : input layer reduction: percentage of reduction of feature maps """ # Reduce (compress) the number of feature maps (DenseNet-C) # shape[n] returns a class object. We use int() to cast it into the dimension size n_filters = int( int(x.shape[3]) * reduction) # BN-LI-Conv pre-activation form of convolutions x = layers.BatchNormalization()(x) # Use 1x1 linear projection convolution x = layers.Conv2D(n_filters, (1, 1), strides=(1, 1), use_bias=False)(x) # Use mean value (average) instead of max value sampling when pooling reduce by 75% x = layers.AveragePooling2D((2, 2), strides=(2, 2))(x) return x
def InceptionV3_top30(inputs, classes=1000, pooling='avg'): global backend x = inputs if backend.image_data_format() == 'channels_first': channel_axis = 1 else: channel_axis = 3 # mixed 10: 8 x 8 x 2048 branch1x1 = conv2d_bn(x, 320, 1, 1) branch3x3 = conv2d_bn(x, 384, 1, 1) branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3) branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1) branch3x3 = layers.concatenate([branch3x3_1, branch3x3_2], axis=channel_axis, name='mixed9_' + str(1)) branch3x3dbl = conv2d_bn(x, 448, 1, 1) branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3) branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3) branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1) branch3x3dbl = layers.concatenate([branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis) branch_pool = layers.AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x) branch_pool = conv2d_bn(branch_pool, 192, 1, 1) x = layers.concatenate([branch1x1, branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed' + str(10)) x = layers.GlobalAveragePooling2D(name='avg_pool')(x) x = layers.Dense(classes, activation='softmax', name='predictions', kernel_regularizer=regularization)(x) model = keras.Model(inputs, x, name='inception_v3_top30') return model
def Conv2DBlockRegu(_input, n_kerns=4, kern_space=1, kern_length=64, kern_regu_scale=0.0, l1=0.000, l2=0.000, n_pool=4, dropout_rate=0.25, dropout_type='Dropout', activation='elu', return_model=None): if dropout_type == 'SpatialDropout2D': dropout_type = layers.SpatialDropout2D elif dropout_type == 'Dropout': dropout_type = layers.Dropout else: raise ValueError('dropoutType must be one of SpatialDropout2D ' 'or Dropout, passed as a string.') num_chans = _input.shape.as_list()[0] if kern_regu_scale: kern_regu = KernelLengthRegularizer((1, kern_length), window_func='poly', window_scale=kern_regu_scale, poly_exp=2, threshold=0.0015) elif l1 > 0 or l2 > 0: kern_regu = tf.keras.regularizers.l1_l2(l1=l1, l2=l2) else: kern_regu = None # Temporal-filter-like _y = layers.Conv2D(n_kerns, (kern_space, kern_length), padding='same', kernel_regularizer=kern_regu, use_bias=False)(_input) _y = layers.BatchNormalization()(_y) _y = layers.Activation(activation)(_y) if n_pool > 1: _y = layers.AveragePooling2D((1, n_pool))(_y) _y = dropout_type(dropout_rate)(_y) if return_model is False: return _y else: return models.Model(inputs=input, outputs=_y)
def build_model(hp): """Function that build a TF model based on hyperparameters values. Args: hp (HyperParameter): hyperparameters values Returns: Model: Compiled model """ num_layers = hp.Int('num_layers', 2, 8, default=6) lr = hp.Choice('learning_rate', [1e-3, 5e-4]) inputs = layers.Input(shape=(28, 28, 1)) x = inputs for idx in range(num_layers): idx = str(idx) filters = hp.Int('filters_' + idx, 32, 256, step=32, default=64) x = layers.Conv2D(filters=filters, kernel_size=3, padding='same', activation='relu')(x) # add a pooling layers if needed if x.shape[1] >= 8: pool_type = hp.Choice('pool_' + idx, values=['max', 'avg']) if pool_type == 'max': x = layers.MaxPooling2D(2)(x) elif pool_type == 'avg': x = layers.AveragePooling2D(2)(x) x = layers.Flatten()(x) outputs = layers.Dense(10, activation='softmax')(x) # Build model model = keras.Model(inputs, outputs) model.compile(optimizer=Adam(lr), loss='categorical_crossentropy', metrics=['accuracy']) return model
def inception_v4_c(input_tensor): """ block c of inception v4 Args: input_tensor (keras tensor): input tensor Returns: keras tensor """ avgpool = layers.AveragePooling2D(padding='same', strides=1)(input_tensor) conv_pool = layers.Conv2D(256, 1, 1, padding='same')(avgpool) conv_pool = layers.BatchNormalization()(conv_pool) conv_pool = layers.ReLU()(conv_pool) conv1 = layers.Conv2D(256, 1, 1, padding='same')(input_tensor) conv1 = layers.BatchNormalization()(conv1) conv1 = layers.ReLU()(conv1) conv2 = layers.Conv2D(384, 1, 1, padding='same')(input_tensor) conv2 = layers.BatchNormalization()(conv2) conv2 = layers.ReLU()(conv2) conv21 = layers.Conv2D(256, (1, 3), 1, padding='same')(conv2) conv21 = layers.BatchNormalization()(conv21) conv21 = layers.ReLU()(conv21) conv22 = layers.Conv2D(256, (3, 1), 1, padding='same')(conv2) conv22 = layers.BatchNormalization()(conv22) conv22 = layers.ReLU()(conv22) conv3 = layers.Conv2D(384, 1, 1, padding='same')(input_tensor) conv3 = layers.BatchNormalization()(conv3) conv3 = layers.ReLU()(conv3) conv3 = layers.Conv2D(448, (1, 3), 1, padding='same')(conv3) conv3 = layers.BatchNormalization()(conv3) conv3 = layers.ReLU()(conv3) conv3 = layers.Conv2D(512, (3, 1), 1, padding='same')(conv3) conv3 = layers.BatchNormalization()(conv3) conv3 = layers.ReLU()(conv3) conv31 = layers.Conv2D(256, (3, 1), 1, padding='same')(conv3) conv31 = layers.BatchNormalization()(conv31) conv31 = layers.ReLU()(conv31) conv32 = layers.Conv2D(256, (1, 3), 1, padding='same')(conv3) conv32 = layers.BatchNormalization()(conv32) conv32 = layers.ReLU()(conv32) x = layers.Concatenate()( [conv_pool, conv1, conv21, conv22, conv31, conv32]) return x
def get_model(): """ Lenet5 expects 32 x 32 image size. Model taken from https://engmrk.com/lenet-5-a-classic-cnn-architecture/ """ import tensorflow as tf from tensorflow.keras import layers model = tf.keras.Sequential() model.add(tf.keras.Input(shape=( 32, 32, 3, ))) model.add( layers.Conv2D(filters=6, kernel_size=(5, 5), strides=(1, 1), activation="relu")) model.add(layers.AveragePooling2D(pool_size=(2, 2), )) model.add( layers.Conv2D(filters=16, kernel_size=(5, 5), strides=(1, 1), activation="relu")) model.add( layers.Conv2D(filters=6, kernel_size=(2, 2), strides=(2, 2), activation="relu")) model.add(layers.Flatten()) model.add(layers.Dense(units=120)) model.add(layers.Dense(units=84)) model.add(layers.Dense(units=10)) return model
def _mixed(x, filters, name=None): """Utility function to implement the mixed (inception mobilenet) block. # Arguments x: input tensor. filters: a list of filter sizes. name: name of the ops # Returns Output tensor after applying the mixed block. """ if len(filters) != 4: raise ValueError('filters should have 4 components') name1 = name + '_1x1' if name else None branch1x1 = _conv2d_bn(x, filters[0], kernel_size=(1, 1), name=name1) name1 = name + '_3x3' if name else None branch3x3 = _depthwise_conv2d_bn(x, filters[1], kernel_size=(3, 3), name=name1) name1 = name + '_5x5' if name else None branch5x5 = _depthwise_conv2d_bn(x, filters[2], kernel_size=(5, 5), name=name1) name1 = name + '_pool_1' if name else None name2 = name + '_pool_2' if name else None branchpool = layers.AveragePooling2D(pool_size=(3, 3), strides=(1, 1), padding='same', name=name1)(x) branchpool = _conv2d_bn(branchpool, filters[3], (1, 1), name=name2) concat_axis = 1 if backend.image_data_format() == 'channels_first' else 3 x = layers.concatenate([branch1x1, branch3x3, branch5x5, branchpool], axis=concat_axis, name=name) return x
def HIDRA(temporal_encoders='HIDRA', probabilistic=True, num_predictions=72, name='HIDRA'): """Build the HIDRA model. Args: temporal_encoders (str, optional): Which temporal encoders to use (One of: 'HIDRA', 'LSTM', 'TCN'). Defaults to 'HIDRA'. probabilistic (bool, optional): Model outputs as probability distributions. Defaults to True. num_predictions (int, optional): Number of predicted times. """ # Time invariant atmospheric spatial encoder weather_cnn = tf.keras.Sequential([ SpatialEncoding(), ResNet_v2(num_res_blocks=2, reduce_fn=L.AveragePooling2D((2,2))), ]) # Add spatial attention and ReLU weather_cnn_full = tf.keras.Sequential([ TimeInvariant(weather_cnn), FlattenSpatial(), TemporalLinearCombination(combination_axis=2, temporal_axis=1), L.ReLU() ]) # Regression network regression = RegressionNetwork( num_predictions=num_predictions, units=[256, 256, 256], dropout_rate=0.5, probabilistic=probabilistic) # Temporal encoders if temporal_encoders == 'HIDRA': weather_pr = LinearCombination(axis=1) ssh_pr = L.Flatten() elif temporal_encoders == 'LSTM': weather_pr = LSTMStack([128,128,128]) ssh_pr = LSTMStack([32,32,32]) elif temporal_encoders == 'TCN': weather_pr = TCN([128,128,128]) ssh_pr = TCN([32,32,32]) model = HIDRABase(weather_cnn_full, weather_pr, ssh_pr, regression, name=name) return model
def __init__(self, in_channels, out_channels, pool_out_size, upscale_out_size, data_format="channels_last", **kwargs): super(PyramidPoolingBranch, self).__init__(**kwargs) self.upscale_out_size = upscale_out_size self.data_format = data_format self.pool = nn.AveragePooling2D( pool_size=pool_out_size, data_format=data_format, name="pool") self.conv = conv1x1_block( in_channels=in_channels, out_channels=out_channels, data_format=data_format, name="conv")
def trans_block(x, reduce_by): """ Construct a Transition Block x : input layer reduce_by: percentage of reduction of feature maps """ # Reduce (compression) the number of feature maps (DenseNet-C) # shape[n] returns a class object. We use int() to cast it into the dimension # size nb_filters = int(int(x.shape[3]) * reduce_by) # Bottleneck convolution x = layers.Conv2D(nb_filters, (1, 1), strides=(1, 1))(x) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) # Use mean value (average) instead of max value sampling when pooling # reduce by 75% x = layers.AveragePooling2D((2, 2), strides=(2, 2))(x) return x
def define_model_bins(nchan, L, Fs): model = tf.keras.Sequential() model.add(layers.InputLayer((L, nchan), batch_size=1)) model.add( MorletConv([L, nchan], Fs, input_shape=[L, nchan, 1], etas=25, wtime=0.04)) model.add( layers.Conv2D(filters=25, kernel_size=[1, nchan], activation='elu')) model.add(layers.Permute((3, 1, 2))) model.add(layers.AveragePooling2D(pool_size=(1, 10), strides=(1, 5))) #model.add(layers.Dropout(0.75)) model.add(layers.Flatten()) model.add(layers.Dense(1, activation='sigmoid')) model.compile(loss=losses.BinaryCrossentropy(), optimizer=optimizers.Adam(), metrics=['accuracy']) return model
def build_model(): model = models.Sequential([ layers.Conv2D(8, (25, 22), activation='relu', name='Conv2D_1', input_shape=(353, 22, 1)), layers.Dropout(0.25), layers.AveragePooling2D((10, 1), name='AveragePooling2D_2'), #layers.Conv2D(8, (5, 1), activation='relu', name='Conv2D_3'), layers.Dropout(0.25), #layers.MaxPooling2D((10, 1),name='MaxPooling2D_4'), #layers.Conv2D(64, (3, 1), activation='relu', name='Conv2D_5'), layers.Flatten(), layers.Dense(4, activation='relu', name='Dense_6'), layers.Dropout(0.25), layers.Dense(1, activation='relu', name='Output_8') ]) optimizer = tf.keras.optimizers.Adam() model.compile(loss='mse', optimizer=optimizer, metrics=['mae', 'mse']) return model
def gen_avgpool2d_test(name, input_shape, kernels, strides, padding): # Create model. inp = layers.Input(name='input', batch_size=input_shape[0], shape=input_shape[1:]) out = layers.AveragePooling2D(pool_size=kernels, strides=strides, padding=padding)(inp) model = Model(inputs=[inp], outputs=[out]) # Create data. np.random.seed(0) inp_tensor = np.random.rand(*input_shape).astype(np.float32) out_tensor = model.predict(inp_tensor) # Save model. save_model(model, name) # Save data. save_tensor(inp_tensor, name + '.inp0') save_tensor(out_tensor, name + '.out0') # Clear session. keras_backend.clear_session()
def transition_block(x, reduction, name): """A transition block. # Arguments x: input tensor. reduction: float, compression rate at transition layers. name: string, block label. # Returns output tensor for the block. """ bn_axis = 3 if K.image_data_format() == 'channels_last' else 1 x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_bn')(x) x = layers.Activation('relu', name=name + '_relu')(x) x = layers.Conv2D(int(K.int_shape(x)[bn_axis] * reduction), 1, use_bias=False, name=name + '_conv')(x) x = layers.AveragePooling2D(2, strides=2, name=name + '_pool')(x) return x
def __init__(self, pool_shape, pool_type='max', use_tf_crop_and_resize=True, **kwargs): ''' Implements ROI Pooling on multiple levels of the feature pyramid. Attributes --- pool_shape: (height, width) of the output pooled regions. Example: (7, 7) ''' super(PyramidROIAlign, self).__init__(**kwargs) self.pre_pool_shape = tuple([2 * x for x in pool_shape]) if pool_type == 'max': self._pool = layers.MaxPool2D(padding='same') elif pool_type == 'avg': self._pool = layers.AveragePooling2D(padding='same') self.use_tf_crop_and_resize = use_tf_crop_and_resize
def __init__(self): super(MobileNetV2, self).__init__() self.conv1 = layers.Conv2D(32, 3, 2, "same") self.bottleneck_1 = build_bottleneck(t=1, in_channel_num=32, out_channel_num=64, n=1, s=1) self.bottleneck_2 = build_bottleneck(6, 16, 24, 2, 2) self.bottleneck_3 = build_bottleneck(t=6, in_channel_num=24, out_channel_num=32, n=3, s=2) self.bottleneck_4 = build_bottleneck(t=6, in_channel_num=32, out_channel_num=64, n=4, s=2) self.bottleneck_5 = build_bottleneck(t=6, in_channel_num=64, out_channel_num=96, n=3, s=1) self.bottleneck_6 = build_bottleneck(t=6, in_channel_num=96, out_channel_num=160, n=3, s=2) self.bottleneck_7 = build_bottleneck(t=6, in_channel_num=160, out_channel_num=320, n=1, s=1) self.conv2 = layers.Conv2D(1280, 1, 1, "same") self.avgpool = layers.AveragePooling2D((7, 7)) self.conv3 = layers.Conv2D(NUM_CLASSES, 1, 1, "same", activation=tf.keras.activations.softmax)
def __init__(self, dropout_rate=0.0, in_channels=3, in_size=(299, 299), classes=1000, data_format="channels_last", **kwargs): super(InceptionV4, self).__init__(**kwargs) self.in_size = in_size self.classes = classes self.data_format = data_format layers = [4, 8, 4] normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit] reduction_units = [ReductionAUnit, ReductionBUnit] self.features = tf.keras.Sequential(name="features") self.features.add( InceptInitBlock(in_channels=in_channels, data_format=data_format, name="init_block")) for i, layers_per_stage in enumerate(layers): stage = tf.keras.Sequential(name="stage{}".format(i + 1)) for j in range(layers_per_stage): if (j == 0) and (i != 0): unit = reduction_units[i - 1] else: unit = normal_units[i] stage.add( unit(data_format=data_format, name="unit{}".format(j + 1))) self.features.add(stage) self.features.add( nn.AveragePooling2D(pool_size=8, strides=1, data_format=data_format, name="final_pool")) self.output1 = tf.keras.Sequential(name="output1") if dropout_rate > 0.0: self.output1.add(nn.Dropout(rate=dropout_rate, name="dropout")) self.output1.add(nn.Dense(units=classes, input_dim=1536, name="fc"))
def __init__(self, classes, width_multiplier=1): super(MobileNetV2, self).__init__() a = width_multiplier self.classes = classes self.m_layers = LayerList() # convolucion inicial l = basic_conv_block(int(a * 32), (3, 3), stride=2, dropout=0.25, activation="ReLU6", name="layer_0") self.m_layers.add(l) # los bloques de bottleneck intermedios self.crearBloques(32, 1, a * 16, 1, 1) self.crearBloques(16, 6, a * 24, 2, 2) self.crearBloques(24, 6, a * 32, 3, 2) self.crearBloques(32, 6, a * 64, 4, 2) self.crearBloques(69, 6, a * 96, 3, 1) self.crearBloques(96, 6, a * 160, 3, 2) self.crearBloques(160, 6, a * 320, 1, 1) # ultima convolucion l = pwise_conv_block(int(a * 1280), dropout=0.25, activation="ReLU6", name="layer_{}_conv1x1".format(len( self.m_layers))) self.m_layers.add(l) # Average Pooling y Fully Connected self.m_layers.add(layers.AveragePooling2D(pool_size=(7, 7), strides=(1, 1)), training_arg=False) self.m_layers.add(layers.Flatten(), training_arg=False) self.m_layers.add(layers.Dense(1280)) self.m_layers.add(layers.Dropout(0.5, name="dropout"), only_training=True) self.m_layers.add(layers.Dense(classes)) self.m_layers.add(layers.Activation("softmax"))
def __init__(self, filters, kernel_size, strides, pool_size=(2, 2), activation='relu', *args, **kwargs ): super(ConvLayer, self).__init__(*args, **kwargs) self.batch_norm = layers.BatchNormalization() self.conv_1 = layers.Conv2D( filters=filters, kernel_size=kernel_size, strides=strides, activation=activation, padding='same', ) self.conv_2 = layers.Conv2D( filters=filters, kernel_size=kernel_size, strides=strides, activation=activation, padding='same', ) self.conv_3 = layers.Conv2D( filters=filters, kernel_size=kernel_size, strides=strides, activation='relu', padding='same', ) self.avg_pool = layers.AveragePooling2D( pool_size=pool_size, padding='same', )
def inception_v4_b(input_tensor): """ b block for inception v4 Args: input_tensor (keras tensor): input tensor Returns: keras tensor """ avgpool = layers.AveragePooling2D(strides=1, padding='same')(input_tensor) conv_pool = layers.Conv2D(128, 1, 1, padding='same')(avgpool) conv_pool = layers.BatchNormalization()(conv_pool) conv_pool = layers.ReLU()(conv_pool) conv1 = layers.Conv2D(384, 1, 1, padding='same')(input_tensor) conv1 = layers.BatchNormalization()(conv1) conv1 = layers.ReLU()(conv1) conv2 = layers.Conv2D(192, 1, 1, padding='same')(input_tensor) conv2 = layers.BatchNormalization()(conv2) conv2 = layers.ReLU()(conv2) conv2 = layers.Conv2D(224, (1, 7), 1, padding='same')(conv2) conv2 = layers.BatchNormalization()(conv2) conv2 = layers.ReLU()(conv2) conv2 = layers.Conv2D(256, (1, 7), 1, padding='same')(conv2) conv2 = layers.BatchNormalization()(conv2) conv2 = layers.ReLU()(conv2) conv3 = layers.Conv2D(192, 1, 1, padding='same')(input_tensor) conv3 = layers.BatchNormalization()(conv3) conv3 = layers.ReLU()(conv3) conv3 = layers.Conv2D(192, (1, 7), 1, padding='same')(conv3) conv3 = layers.BatchNormalization()(conv3) conv3 = layers.ReLU()(conv3) conv3 = layers.Conv2D(224, (7, 1), 1, padding='same')(conv3) conv3 = layers.BatchNormalization()(conv3) conv3 = layers.ReLU()(conv3) conv3 = layers.Conv2D(224, (1, 7), 1, padding='same')(conv3) conv3 = layers.BatchNormalization()(conv3) conv3 = layers.ReLU()(conv3) conv3 = layers.Conv2D(256, (7, 1), 1, padding='same')(conv3) conv3 = layers.BatchNormalization()(conv3) conv3 = layers.ReLU()(conv3) concat = layers.Concatenate()([conv_pool, conv1, conv2, conv3]) return concat
def pyramidnet_cifar(inputs_shape, depth, alpha, num_classes, bottleneck=False): if bottleneck: n = int((depth - 2) / 9) block = bottle_neck else: n = int((depth - 2) / 6) block = basic_block addrate = alpha / 3 / n inputs = layers.Input(shape=inputs_shape) x = layers.Conv2D(filters=16, kernel_size=3, padding="same", use_bias=False)(inputs) x = layers.BatchNormalization()(x) x, featuremap_dim = make_group(x, 16, addrate=addrate, block=block, block_depth=n) x, featuremap_dim = make_group(x, featuremap_dim, addrate=addrate, block=block, block_depth=n, stride=2) x, featuremap_dim = make_group(x, featuremap_dim, addrate=addrate, block=block, block_depth=n, stride=2) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) x = layers.AveragePooling2D(8)(x) x = layers.Flatten()(x) x = layers.Dense(num_classes)(x) return tf.keras.Model(inputs=inputs, outputs=x)
def resnet(input_shape, depth): num_classes = 2 if (depth - 2) % 6 != 0: raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])') num_filters = 16 num_res_blocks = int((depth - 2) / 6) inputs = layers.Input(shape=input_shape) x = resnet_layer(inputs=inputs) for stack in range(3): for res_block in range(num_res_blocks): strides = 1 if stack > 0 and res_block == 0: strides = 2 y = resnet_layer(inputs=x, num_filters=num_filters, strides=strides) y = resnet_layer(inputs=y, num_filters=num_filters, activation=None) if stack > 0 and res_block == 0: x = resnet_layer(inputs=x, num_filters=num_filters, kernel_size=1, strides=strides, activation=None, batch_normalization=False) x = layers.add([x, y]) x = layers.Activation('relu')(x) num_filters *= 2 x = layers.AveragePooling2D(pool_size=8)(x) y = layers.Flatten()(x) outputs = layers.Dense(num_classes, activation='softmax', kernel_initializer='he_normal')(y) model = models.Model(inputs=inputs, outputs=outputs) return model
def inception_c(self, x): branch1 = layers.AveragePooling2D(pool_size=(3, 3), strides=1, padding='same')(x) branch2 = self.conv2d_bn(x, 256, (1, 1)) branch3 = self.conv2d_bn(x, 384, (1, 1)) branch31 = self.conv2d_bn(branch3, 256, (1, 3)) branch32 = self.conv2d_bn(branch3, 256, (3, 1)) branch3 = layers.concatenate([branch31, branch32], axis=3) branch4 = self.conv2d_bn(x, 384, 1) branch4 = self.conv2d_bn(branch4, 448, (1, 3)) branch4 = self.conv2d_bn(branch4, 512, (3, 1)) branch41 = self.conv2d_bn(branch4, 256, (3, 1)) branch42 = self.conv2d_bn(branch4, 256, (1, 3)) branch4 = layers.concatenate([branch41, branch42], axis=3) x = layers.concatenate([branch1, branch2, branch3, branch4], axis=3) return x
def conv2d_stats_block(input_layer, cropping, stats_block): x = layers.Cropping2D(cropping, name='crop_' + stats_block)(input_layer) x = layers.Conv2D(8, (5, 5), (2, 2), name='conv2d_' + stats_block)(x) x = layers.BatchNormalization(name='batchnorm_' + stats_block)(x) x = layers.MaxPool2D((2, 2))(x) x = convolutional_block(x, 3, [8, 8, 16], 2, stats_block + '_a', s=1) x = identity_block(x, 3, [8, 8, 16], 2, stats_block + '_b') x = identity_block(x, 3, [8, 8, 16], 2, stats_block + '_c') x = convolutional_block(x, 3, [16, 16, 32], 3, stats_block + '_a', s=2) x = identity_block(x, 3, [16, 16, 32], 3, stats_block + '_b') x = identity_block(x, 3, [16, 16, 32], 3, stats_block + '_c') x = identity_block(x, 3, [16, 16, 32], 3, stats_block + '_d') x = convolutional_block(x, 3, [32, 32, 64], 4, stats_block + '_a', s=2) x = identity_block(x, 3, [32, 32, 64], 4, stats_block + '_b') x = identity_block(x, 3, [32, 32, 64], 4, stats_block + '_c') x = identity_block(x, 3, [32, 32, 64], 4, stats_block + '_d') return layers.AveragePooling2D((3, 3))(x)
def build_D(fade_in_alpha, mbstd_group_size=4, initial_resolution=2, target_resolution=10, num_channels=3): model_list = list() disc_block_list = list() for res in range(initial_resolution, target_resolution + 1): x0 = layers.Input(shape=(2**res, 2**res, num_channels)) curr_from_rgb = fromrgb(res, num_channels) curr_D_block = block_D(res, initial_resolution, mbstd_group_size) x = curr_from_rgb(x0) x = curr_D_block(x) if res > initial_resolution: x_ds = layers.AveragePooling2D(name="downsample_%dx%d" % (2**res, 2**res))(x0) x_ds = prev_from_rgb(x_ds) x = FadeIn(fade_in_alpha=fade_in_alpha, name="fade_in_%dx%d" % (2**res, 2**res))([x_ds, x]) for prev_d in disc_block_list[::-1]: x = prev_d(x) disc_block_list.append(curr_D_block) prev_from_rgb = curr_from_rgb mdl = Model(inputs=x0, outputs=x) model_list.append(mdl) return model_list
def d_block(x, n_filters, pool=True, use_bias=True, bn=L.Layer, act=L.ReLU): skip = L.Conv2D(n_filters, kernel_size=1, use_bias=use_bias, **common)(x) skip = bn()(skip) x = L.Conv2D(n_filters, kernel_size=3, use_bias=use_bias, **common)(x) x = bn()(x) x = act()(x) x = L.Conv2D(n_filters, kernel_size=3, use_bias=use_bias, **common)(x) x = bn()(x) x = act()(x) x = L.Conv2D(n_filters, kernel_size=1, **common)(x) x = L.Add()([x, skip]) x = bn()(x) x = act()(x) if pool: x = L.AveragePooling2D()(x) return x
def google_net(width, height): normalizationEpsilon = 1e-6 input = layers.Input(shape=(width, height, 3)) conv_7x7_2_1 = layers.Conv2D(64, (7, 7), activation='relu', strides=2, padding='same')(input) max_3x3_2_1 = layers.MaxPooling2D((3, 3), strides=2, padding='same')(conv_7x7_2_1) norm_1 = layers.LayerNormalization( epsilon=normalizationEpsilon)(max_3x3_2_1) conv_1x1_1_2 = layers.Conv2D(64, (1, 1), activation='relu')(norm_1) conv_3x3_1_2 = layers.Conv2D(192, (3, 3), activation='relu', padding='same')(conv_1x1_1_2) norm_2 = layers.LayerNormalization( epsilon=normalizationEpsilon)(conv_3x3_1_2) max_3x3_2_2 = layers.MaxPooling2D((3, 3), strides=2, padding='same')(norm_2) inc_3a = getInception(64, 96, 128, 16, 32, 32, max_3x3_2_2) inc_3b = getInception(128, 128, 192, 32, 96, 64, inc_3a) max_3x3_2_3 = layers.MaxPooling2D((3, 3), strides=2, padding='same')(inc_3b) inc_4a = getInception(192, 96, 208, 16, 48, 64, max_3x3_2_3) inc_4b = getInception(160, 112, 224, 24, 64, 64, inc_4a) inc_4c = getInception(128, 128, 256, 24, 64, 64, inc_4b) inc_4d = getInception(112, 144, 288, 32, 64, 64, inc_4c) inc_4e = getInception(256, 160, 320, 32, 128, 128, inc_4d) max_3x3_2_4 = layers.MaxPooling2D((3, 3), strides=2, padding='same')(inc_4e) inc_5a = getInception(256, 160, 320, 32, 128, 128, max_3x3_2_4) inc_5b = getInception(384, 192, 384, 48, 128, 128, inc_5a) avg_6 = layers.AveragePooling2D((7, 7), padding='same')(inc_5b) dropout_6 = layers.Dropout(0.4)(max_3x3_2_1) flatten = layers.Flatten()(dropout_6) fc_6 = layers.Dense(1000, activation='relu')(flatten) dropout_7 = layers.Dropout(0.4)(fc_6) fc_7 = layers.Dense(10, activation='softmax')(dropout_7) model = tf.keras.Model(inputs=input, outputs=fc_7, name='google_net') return model
def __init__(self, nc): super(Inception, self).__init__() self.conv7x7 = layers.Conv2D(64, kernel_size=7, strides=2, padding="same", activation="relu") self.maxPooling3x3 = layers.MaxPool2D(pool_size=3, strides=2, padding="same") self.batchNormal1 = layers.BatchNormalization() self.conv1x1 = layers.Conv2D(64, kernel_size=1, strides=1, padding="same", activation="relu") self.conv3x3 = layers.Conv2D(192, kernel_size=3, strides=1, padding="same", activation="relu") self.batchNormal2 = layers.BatchNormalization() self.inception3a = InceptionBlock([64, 96, 128, 16, 32, 32]) self.inception3b = InceptionBlock([128, 128, 192, 32, 96, 64]) self.inception4a = InceptionBlock([192, 96, 208, 16, 48, 64]) self.inception4b = InceptionBlock([160, 112, 224, 24, 64, 64]) self.inception4c = InceptionBlock([128, 128, 256, 24, 64, 64]) self.inception4d = InceptionBlock([112, 144, 288, 32, 64, 64]) self.inception4e = InceptionBlock([256, 160, 320, 32, 128, 128]) self.inception5a = InceptionBlock([256, 160, 320, 32, 128, 128]) self.inception5b = InceptionBlock([384, 192, 384, 48, 128, 128]) self.averagePooling = layers.AveragePooling2D(pool_size=7, strides=1) self.fc = layers.Dense(nc) self.classifier1 = Classifier(nc) self.classifier2 = Classifier(nc)
def __init__(self): super(InceptionBlockB, self).__init__() self.b1_pool = layers.AveragePooling2D((3, 3), 1, "same") self.b1_conv = BasicConv2D(128, (1, 1), 1, "same") self.b2_conv = BasicConv2D(filters=384, kernel_size=(1, 1), strides=1, padding="same") self.b3_conv1 = BasicConv2D(filters=192, kernel_size=(1, 1), strides=1, padding="same") self.b3_conv2 = BasicConv2D(filters=224, kernel_size=(1, 7), strides=1, padding="same") self.b3_conv3 = BasicConv2D(filters=256, kernel_size=(1, 7), strides=1, padding="same") self.b4_conv1 = BasicConv2D(filters=192, kernel_size=(1, 1), strides=1, padding="same") self.b4_conv2 = BasicConv2D(filters=192, kernel_size=(1, 7), strides=1, padding="same") self.b4_conv3 = BasicConv2D(filters=224, kernel_size=(7, 1), strides=1, padding="same") self.b4_conv4 = BasicConv2D(filters=224, kernel_size=(1, 7), strides=1, padding="same") self.b4_conv5 = BasicConv2D(filters=256, kernel_size=(7, 1), strides=1, padding="same")