def generator_model(self): model = tf.keras.Sequential() # Linear block model.add(kl.Dense(self.crop_size * 8 * 4 * 4 * 2, input_shape=(self.z_dim,), kernel_initializer=tf.keras.initializers.random_normal(stddev=0.01))) model.add(kl.Reshape((2, 4, 4, self.crop_size * 8))) model.add(kl.BatchNormalization()) model.add(kl.ReLU()) # Convolution block 1 model.add(kl.Conv3DTranspose(filters=self.crop_size * 4, kernel_size=4, strides=2, padding='same', kernel_initializer=self.conv_init, use_bias=True)) model.add(kl.BatchNormalization()) model.add(kl.ReLU()) # Convolution block 2 model.add(kl.Conv3DTranspose(filters=self.crop_size * 2, kernel_size=4, strides=2, padding='same', kernel_initializer=self.conv_init, use_bias=True)) model.add(kl.BatchNormalization()) model.add(kl.ReLU()) # Convolution block 3 model.add(kl.Conv3DTranspose(filters=self.crop_size, kernel_size=4, strides=2, padding='same', kernel_initializer=self.conv_init, use_bias=True)) model.add(kl.BatchNormalization()) model.add(kl.ReLU()) # Convolution block 4 model.add(kl.Conv3DTranspose(filters=3, kernel_size=4, strides=2, padding='same', kernel_initializer=self.conv_init, use_bias=True, activation='tanh')) return model
def __init__(self, atrous_rates, norm_layer, norm_kwargs, conv_trainable=True, **kwargs): super(ASPP, self).__init__() out_channels = 256 self.b0 = tf.keras.Sequential([ klayers.Conv2D(out_channels, kernel_size=1, kernel_initializer='he_uniform', use_bias=False, trainable=conv_trainable), norm_layer(**({} if norm_kwargs is None else norm_kwargs)), klayers.ReLU() ]) rate1, rate2, rate3 = tuple(atrous_rates) self.b1 = ASPPConv(out_channels, rate1, norm_layer, norm_kwargs, conv_trainable=conv_trainable) self.b2 = ASPPConv(out_channels, rate2, norm_layer, norm_kwargs, conv_trainable=conv_trainable) self.b3 = ASPPConv(out_channels, rate3, norm_layer, norm_kwargs, conv_trainable=conv_trainable) self.b4 = ASPPPooling(out_channels, norm_layer=norm_layer, norm_kwargs=norm_kwargs, conv_trainable=conv_trainable) self.concat = klayers.Concatenate() self.project = tf.keras.Sequential([ klayers.Conv2D(out_channels, kernel_size=1, kernel_initializer='he_uniform', use_bias=False, trainable=conv_trainable), norm_layer(**({} if norm_kwargs is None else norm_kwargs)), klayers.ReLU(), klayers.Dropout(0.5) ])
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha, depth_multiplier=1, strides=(1, 1), block_id=1): channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1 pointwise_conv_filters = int(pointwise_conv_filters * alpha) if strides == (1, 1): x = inputs else: x = layers.ZeroPadding2D(((0, 1), (0, 1)), name='conv_pad_%d' % block_id)(inputs) x = layers.DepthwiseConv2D( (3, 3), padding='same' if strides == (1, 1) else 'valid', depth_multiplier=depth_multiplier, strides=strides, use_bias=False, name='conv_dw_%d' % block_id)(x) x = layers.BatchNormalization(axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x) x = layers.ReLU(6., name='conv_dw_%d_relu' % block_id)(x) x = layers.Conv2D(pointwise_conv_filters, (1, 1), padding='same', use_bias=False, strides=(1, 1), name='conv_pw_%d' % block_id)(x) x = layers.BatchNormalization(axis=channel_axis, name='conv_pw_%d_bn' % block_id)(x) return layers.ReLU(6., name='conv_pw_%d_relu' % block_id)(x)
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id): channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1 in_channels = backend.int_shape(inputs)[channel_axis] pointwise_conv_filters = int(filters * alpha) pointwise_filters = _make_divisible(pointwise_conv_filters, 8) x = inputs prefix = 'block_{}_'.format(block_id) if block_id: # Expand x = layers.Conv2D(round(expansion * in_channels), kernel_size=1, padding='same', use_bias=False, activation=None, kernel_regularizer=regularizers.l2(l2_reg), name=prefix + 'expand')(x) x = layers.BatchNormalization(axis=channel_axis, epsilon=1e-3, momentum=0.999, name=prefix + 'expand_BN')(x) x = layers.ReLU(6., name=prefix + 'expand_relu')(x) else: prefix = 'expanded_conv_' # Depthwise if stride == 2: x = layers.ZeroPadding2D(padding=correct_pad(backend, x, 3), name=prefix + 'pad')(x) x = layers.DepthwiseConv2D(kernel_size=3, strides=stride, activation=None, use_bias=False, depthwise_regularizer=regularizers.l2(l2_reg), padding='same' if stride == 1 else 'valid', name=prefix + 'depthwise')(x) x = layers.BatchNormalization(axis=channel_axis, epsilon=1e-3, momentum=0.999, name=prefix + 'depthwise_BN')(x) x = layers.ReLU(6., name=prefix + 'depthwise_relu')(x) # Project x = layers.Conv2D(pointwise_filters, kernel_size=1, padding='same', use_bias=False, activation=None, kernel_regularizer=regularizers.l2(l2_reg), name=prefix + 'project')(x) x = layers.BatchNormalization(axis=channel_axis, epsilon=1e-3, momentum=0.999, name=prefix + 'project_BN')(x) if in_channels == pointwise_filters and stride == 1: return layers.Add(name=prefix + 'add')([inputs, x]) return x
def reconstructor(input_shape=(28, 28, 1)): model = tf.keras.Sequential() # Encoder Block model.add( layers.Conv2D(32, (5, 5), strides=(2, 2), padding='same', kernel_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), input_shape=input_shape)) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(0.2)) model.add( layers.Conv2D(64, (5, 5), strides=(2, 2), kernel_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), padding='same')) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(0.2)) model.add( layers.Conv2D(128, (5, 5), strides=(2, 2), kernel_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), padding='same')) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU(0.2)) # Decoder Block model.add( layers.Conv2DTranspose( 32, (5, 5), strides=(2, 2), output_padding=(0, 0), padding='same', kernel_initializer=tf.keras.initializers.RandomNormal( stddev=0.02))) model.add(layers.BatchNormalization()) model.add(layers.ReLU()) model.add( layers.Conv2DTranspose( 16, (5, 5), strides=(2, 2), padding='same', kernel_initializer=tf.keras.initializers.RandomNormal( stddev=0.02))) model.add(layers.BatchNormalization()) model.add(layers.ReLU()) model.add( layers.Conv2DTranspose( 1, (5, 5), strides=(2, 2), padding='same', kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.02), activation='tanh')) return model
def build_generator(input_shape=(256, 256, 3), num_blocks=9): """Generator network architecture""" x0 = layers.Input(input_shape) x = ReflectionPadding2D(padding=(3, 3))(x0) x = layers.Conv2D(filters=64, kernel_size=7, strides=1, kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.ReLU()(x) # downsample x = layers.Conv2D(filters=128, kernel_size=3, strides=2, padding='same', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.ReLU()(x) x = layers.Conv2D(filters=256, kernel_size=3, strides=2, padding='same', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.ReLU()(x) # residual for _ in range(num_blocks): x = _resblock(x) # upsample x = layers.Conv2DTranspose(filters=128, kernel_size=3, strides=2, padding='same', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.ReLU()(x) x = layers.Conv2DTranspose(filters=64, kernel_size=3, strides=2, padding='same', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.ReLU()(x) # final x = ReflectionPadding2D(padding=(3, 3))(x) x = layers.Conv2D(filters=3, kernel_size=7, activation='tanh', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) return Model(inputs=x0, outputs=x)
def generator_model(): model = keras.Sequential() model.add(layers.Dense(1024, input_shape=(66,))) model.add(layers.BatchNormalization()) model.add(layers.ReLU()) model.add(layers.Dense(512)) model.add(layers.BatchNormalization()) model.add(layers.ReLU()) model.add(layers.Dense(256)) model.add(layers.BatchNormalization()) model.add(layers.ReLU()) model.add(layers.Dense(99, activation='tanh')) return model
def upsample(units, input_shape=None, apply_dropout=False, layer_type='dense', output_padding=(1, 1)): initializer = random_normal_initializer(0., 0.02) seq = Sequential() if layer_type == 'dense': seq.add( layers.Dense(units, input_shape=[ input_shape, ], kernel_initializer=initializer, use_bias=False)) elif layer_type == 'conv': seq.add( layers.Conv2DTranspose(filters=units, kernel_size=3, strides=(2, 2), padding='same', input_shape=input_shape, kernel_initializer=initializer, use_bias=False, output_padding=output_padding)) else: raise ValueError('wrong layer_type!') seq.add(layers.BatchNormalization()) if apply_dropout: seq.add(layers.Dropout(0.5)) seq.add(layers.ReLU()) return seq
def define_generator(latent_dim=50, nclasses=10): label = layers.Input(shape=(1, )) li = layers.Embedding(nclasses, 50)(label) li = layers.Dense(7 * 7 * 1, activation="relu")(li) li = layers.Reshape((7, 7, 1))(li) noise = layers.Input(shape=(latent_dim, )) n = layers.Dense(7 * 7 * 384, activation="relu")(noise) n = layers.Reshape((7, 7, 384))(n) input = layers.concatenate([n, li], axis=-1) x = layers.Conv2DTranspose(filters=192, kernel_size=5, strides=2, padding="same")(input) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) x = layers.Conv2DTranspose(filters=1, kernel_size=5, strides=2, padding="same", activation="tanh")(x) model = tf.keras.Model([noise, label], x) return model
def bottleneck_v1(inputs, filters, stage, block, use_bias=True, init_strides=(1, 1), is_first_block_of_first_layer=False, kernel_initializer=initializers.he_normal(), kernel_regularizer=regularizers.l2(WEIGHT_DECAY), bn_axis=-1, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON): if isinstance(filters, int): filter1, filter2, filter3 = filters, filters, 4 * filters else: filter1, filter2, filter3 = filters base_name = str(stage) + block + '_branch' x = conv_bn_relu(inputs, filters=filter1, kernel_size=(1, 1), strides=init_strides, use_bias=use_bias, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, bn_axis=bn_axis, momentum=momentum, epsilon=epsilon, name=base_name + '2a') x = conv_bn_relu(x, filters=filter2, kernel_size=(3, 3), strides=(1, 1), use_bias=use_bias, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, bn_axis=bn_axis, momentum=momentum, epsilon=epsilon, name=base_name + '2b') x = conv_bn(x, filters=filter3, kernel_size=(1, 1), strides=(1, 1), use_bias=use_bias, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, bn_axis=bn_axis, momentum=momentum, epsilon=epsilon, name=base_name + '2c') x = shortcut_v1(inputs, x, use_bias=use_bias, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, bn_axis=bn_axis, momentum=momentum, epsilon=epsilon, name=base_name + '1') return layers.ReLU()(x)
def get_activation_layer(x, activation, name="activ"): """ Create activation layer from string/function. Parameters: ---------- x : keras.backend tensor/variable/symbol Input tensor/variable/symbol. activation : function or str Activation function or name of activation function. name : str, default 'activ' Block name. Returns ------- keras.backend tensor/variable/symbol Resulted tensor/variable/symbol. """ assert (activation is not None) if isfunction(activation): x = activation()(x) elif isinstance(activation, str): if activation == "relu": x = nn.Activation("relu", name=name)(x) elif activation == "relu6": x = nn.ReLU(max_value=6.0, name=name)(x) elif activation == "swish": x = swish(x=x, name=name) else: raise NotImplementedError() else: x = activation(x) return x
def __init__(self, nfilters, nsublayer, **kwargs): super(CRPBlock, self).__init__(**kwargs) for i in range(nsublayer): setattr(self, f"conv{i+1}", conv3x3(nfilters)) setattr(self, f"pool{i+1}", layers.MaxPooling2D(5, 1, "same", "channels_first")) self.relu = layers.ReLU() self.nsublayer = nsublayer
def __init__(self, output_channels, kernel_size, strides=(1, 1), padding='valid', weight_decay=5e-4, kernel_initializer=initializers.glorot_uniform, batch_norm_decay=0.995, batch_norm_epsilon=0.001, name=None): super(BaseConvBlock, self).__init__() self.conv = layers.Conv2D(filters=output_channels, kernel_size=kernel_size, strides=strides, padding=padding, kernel_initializer=kernel_initializer, kernel_regularizer=regularizers.l2(weight_decay) \ if weight_decay > 0 else None, use_bias=False, name=name) self.norm = layers.BatchNormalization(axis=-1, momentum=batch_norm_decay, epsilon=batch_norm_epsilon, name=name) self.activation = layers.ReLU()
def RNN(dim, x): # Learnable weights in the cell Wh = layers.Dense(dim, use_bias=False) Wx = layers.Dense(dim) # unstacking the time axis x = tf.unstack(x, axis=1) H = [] h = tf.zeros_like(Wx(x[0])) relu = layers.ReLU() for i in range(len(x)): a = Wx(x[i]) if i != 0: h_i = H[i - 1] else: h_i = h b = Wh(h_i) mid_val = a + b h = relu(mid_val) H.append(h) H = tf.stack(H, axis=1) return h, H
def create_functional_model(): img_input = layers.Input(name='input', shape=(None, None, 3), dtype='float32') x = layers.Conv2D(filters=16, kernel_size=(3, 3), strides=1, padding="same", activation='relu')(img_input) residual = layers.Conv2D(filters=64, kernel_size=(1, 1), strides=2)(x) residual = layers.BatchNormalization()(residual) x = layers.Conv2D(filters=64, kernel_size=(3, 3), strides=2, padding="same")(x) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) x = layers.Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding="same")(x) x = layers.BatchNormalization()(x) x = tf.keras.layers.Add()([residual, x]) x = layers.Dense(units=10, activation='softmax')(x) model = models.Model(img_input, x, name='ResnetBlockTest') return model
def __init__(self, nfilters, npath=2, nsublayer=2, **kwargs): super(RCUBlock, self).__init__(**kwargs) for i in range(npath): for j in range(nsublayer): setattr(self, f"conv{i+1}_{j+1}", conv3x3(nfilters)) self.relu = layers.ReLU() self.npath = npath self.nsublayer = nsublayer
def __init__(self, block, nfilters, nblocks, **kwargs): super(RefineNet, self).__init__(**kwargs) self.conv1 = layers.Conv2D(64, 7, 2, "same", "channels_first", use_bias=False) self.bn1 = layers.BatchNormalization() self.relu1 = layers.ReLU() self.resnet_block1 = self._make_resnet_block(nfilters[0], nblocks[0], stride=1) self.resnet_block2 = self._make_resnet_block(nfilters[1], nblocks[1], stride=2) self.resnet_block3 = self._make_resnet_block(nfilters[2], nblocks[2], stride=2)
def testReturnsProvider_LayerWithResultQuantizer(self): layer = l.ReLU() quantize_provider = self.quantize_registry.get_quantize_provider(layer) output_quantizers = quantize_provider.get_output_quantizers(layer) self.assertLen(output_quantizers, 1) self._assert_activation_quantizers(output_quantizers)
def __init__(self): super().__init__(name='pix2pix_generator') self.dense_1 = layers.Dense(1024, input_shape=(74, )) self.bn_1 = layers.BatchNormalization() self.relu_1 = layers.ReLU() self.dense_2 = layers.Dense(128 * 7 * 7) self.bn_2 = layers.BatchNormalization() self.relu_2 = layers.ReLU() self.reshape = layers.Reshape((7, 7, 128)) self.convT_1 = layers.Conv2DTranspose(64, 4, 2, padding='same') self.bn_3 = layers.BatchNormalization() self.relu_3 = layers.ReLU() self.convT_2 = layers.Conv2DTranspose(1, 4, 2, padding='same', activation='sigmoid')
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)): """Adds an initial convolution layer (with batch normalization and relu6). # Arguments inputs: Input tensor of shape `(rows, cols, 3)` (with `channels_last` data format) or (3, rows, cols) (with `channels_first` data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 32. E.g. `(224, 224, 3)` would be one valid value. filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution). alpha: controls the width of the network. - If `alpha` < 1.0, proportionally decreases the number of filters in each layer. - If `alpha` > 1.0, proportionally increases the number of filters in each layer. - If `alpha` = 1, default number of filters from the paper are used at each layer. kernel: An integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any `dilation_rate` value != 1. # Input shape 4D tensor with shape: `(samples, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, rows, cols, channels)` if data_format='channels_last'. # Output shape 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if data_format='channels_last'. `rows` and `cols` values might have changed due to stride. # Returns Output tensor of block. """ channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1 filters = int(filters * alpha) x = layers.ZeroPadding2D(padding=((0, 1), (0, 1)), name='conv1_pad')(inputs) x = layers.Conv2D(filters, kernel, padding='valid', use_bias=False, strides=strides, name='conv1')(x) x = layers.BatchNormalization(axis=channel_axis, name='conv1_bn')(x) return layers.ReLU(6., name='conv1_relu')(x)
def testQuantizesOutputsFromLayer(self): # TODO(pulkitb): Increase coverage by adding other output quantize layers # such as AveragePooling etc. layer = layers.ReLU() quantized_model = keras.Sequential([ QuantizeWrapper( layers.ReLU(), quantize_provider=self.quantize_registry.get_quantize_provider( layer)) ]) model = keras.Sequential([layers.ReLU()]) inputs = np.random.rand(1, 2, 1) expected_output = tf.quantization.fake_quant_with_min_max_vars( model.predict(inputs), -6.0, 6.0, num_bits=8, narrow_range=False) self.assertAllClose(expected_output, quantized_model.predict(inputs))
def build_generator(self): dim = self.image_size[0] mult = dim // 8 x = inputs = layers.Input((1, 1, self.z_dim)) x = ops.UpConv2D(dim // 2 * mult, 4, 1, 'valid')(x) x = ops.BatchNorm()(x) x = layers.ReLU()(x) while mult > 1: x = ops.UpConv2D(dim // 2 * (mult // 2))(x) x = ops.BatchNorm()(x) x = layers.ReLU()(x) mult //= 2 x = ops.UpConv2D(3)(x) x = layers.Activation('tanh')(x) return models.Model(inputs, x, name='Generator')
def __init__(self, out_channels, norm_layer, norm_kwargs, conv_trainable=True, **kwargs): super(ASPPPooling, self).__init__() self.gap = tf.keras.Sequential([ klayers.GlobalAveragePooling2D(), klayers.Lambda(lambda x: tf.keras.backend.expand_dims(x, 1)), klayers.Lambda(lambda x: tf.keras.backend.expand_dims(x, 1)), klayers.Conv2D(out_channels, kernel_size=1, kernel_initializer='he_uniform', use_bias=False, trainable=conv_trainable), norm_layer(**({} if norm_kwargs is None else norm_kwargs)), klayers.ReLU() ])
def bn_relu(inputs, axis=-1, momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON, bn_name=None): x = layers.BatchNormalization(axis=axis, momentum=momentum, epsilon=epsilon, name=bn_name)(inputs) x = layers.ReLU()(x) return x
def discriminator_and_classifier_model(c_dim): inputs = keras.Input(shape=(99,)) x = layers.Dense(1024, input_shape=(64,))(inputs) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) x = layers.Dense(512)(x) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) x = layers.Dense(256)(x) x = layers.BatchNormalization()(x) x = layers.ReLU()(x) d_out = layers.Dense(1)(x) x = layers.Dense(256)(x) x = layers.LeakyReLU()(x) q_out = layers.Dense(c_dim)(x) return keras.Model(inputs=inputs, outputs=d_out), keras.Model(inputs=inputs, outputs=q_out)
def __init__(self, nclass, norm_layer=None, norm_kwargs=None, conv_trainable=True, **kwargs): super(DeepLabHead, self).__init__() self.aspp = ASPP([12, 24, 36], norm_layer=norm_layer, norm_kwargs=norm_kwargs, conv_trainable=conv_trainable, **kwargs) self.block = tf.keras.Sequential([ klayers.Conv2D(256, kernel_size=3, padding='same', kernel_initializer='he_uniform', use_bias=False, trainable=conv_trainable), norm_layer(**({} if norm_kwargs is None else norm_kwargs)), klayers.ReLU(), klayers.Dropout(0.1), klayers.Conv2D(nclass, kernel_initializer='he_uniform', kernel_size=1, trainable=conv_trainable) ])
def decoder_block(a, n_filters): a = layers.Conv2DTranspose( filters=n_filters, kernel_size=(4, 4), padding='same', strides=(2, 2), kernel_regularizer=regularizers.l1_l2( l1=Config.l1_kernel_regularization, l2=Config.l2_kernel_regularization))(a) a = layers.BatchNormalization()(a) a = layers.ReLU()(a) return a
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)): channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1 filters = int(filters * alpha) x = layers.Conv2D( filters, kernel, padding='same', use_bias=False, strides=strides, name='conv1')(inputs) x = layers.BatchNormalization(axis=channel_axis, name='conv1_bn')(x) return layers.ReLU(6., name='conv1_relu')(x)
def _resblock(x0, num_filter=256, kernel_size=3): x = ReflectionPadding2D()(x0) x = layers.Conv2D(filters=num_filter, kernel_size=kernel_size, kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.ReLU()(x) x = ReflectionPadding2D()(x) x = layers.Conv2D(filters=num_filter, kernel_size=kernel_size, kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.Add()([x, x0]) return x
def __init__(self, block, layer_sizes, width_per_group=64, replace_stride_with_dilation=None): self.inplanes = 64 self.dilation = 1 if replace_stride_with_dilation is None: # each element in the tuple indicates if we should replace # the 2x2 stride with a dilated convolution instead replace_stride_with_dilation = [False, False, False] if len(replace_stride_with_dilation) != 3: raise ValueError("replace_stride_with_dilation should be None " "or a 3-element tuple, got {}".format( replace_stride_with_dilation)) self.base_width = width_per_group conv1 = layers.Conv2D(self.inplanes, (7, 7), strides=(2, 2), padding='same', use_bias=False) bn1 = layers.BatchNormalization() relu = layers.ReLU() maxpool = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same') self.layer0 = Sequential(layers=[conv1, bn1, relu, maxpool], name='layer0') self.layer1 = self._make_layer('layer1', block, 64, layer_sizes[0]) self.layer2 = self._make_layer('layer2', block, 128, layer_sizes[1], stride=2, dilate=replace_stride_with_dilation[0]) self.layer3 = self._make_layer('layer3', block, 256, layer_sizes[2], stride=2, dilate=replace_stride_with_dilation[1]) self.layer4 = self._make_layer('layer4', block, 512, layer_sizes[3], stride=2, dilate=replace_stride_with_dilation[2])