def __init__(self, filters, dropout_rate, kernel_initializer, kernel_regularizer, name='bottleneck_composite_function'): layers = [ L.BatchNormalization(), L.Activation(tf.nn.relu), L.Conv2D( filters * 4, 1, use_bias=False, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer), L.Dropout(dropout_rate), L.BatchNormalization(), L.Activation(tf.nn.relu), L.Conv2D( filters, 3, padding='same', use_bias=False, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer), L.Dropout(dropout_rate), ] super().__init__(layers, name=name)
def __init__(self, num_anchors, kernel_initializer, kernel_regularizer, name='classification_subnet'): super().__init__(name=name) self.num_anchors = num_anchors self.conv_pre = Sequential([ Sequential([ L.Conv2D(256, 3, 1, padding='same', use_bias=False, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer), L.Normalization(), L.Activation(tf.nn.relu), ]) for _ in range(4) ]) self.conv_out = L.Conv2D(num_anchors * 4, 3, 1, padding='same', kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer)
def __init__(self, filters, strides, expansion_factor, dropout_rate, kernel_initializer, kernel_regularizer, name='bottleneck'): super().__init__(name=name) self.expand_conv = Sequential([ L.Conv2D( filters * expansion_factor, # FIXME: should be `input_shape[3].value * expansion_factor` 1, use_bias=False, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer), L.BatchNormalization(), L.Activation(tf.nn.relu6), L.Dropout(dropout_rate) ]) self.depthwise_conv = Sequential([ L.DepthwiseConv2D(3, strides=strides, padding='same', use_bias=False, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer), L.BatchNormalization(), L.Activation(tf.nn.relu6), L.Dropout(dropout_rate) ]) self.linear_conv = Sequential([ L.Conv2D(filters, 1, use_bias=False, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer), L.BatchNormalization(), L.Dropout(dropout_rate) ])
def __init__(self, kernel_initializer=None, kernel_regularizer=None, name='feature_pyramid_network'): if kernel_initializer is None: kernel_initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01) if kernel_regularizer is None: kernel_regularizer = tf.contrib.layers.l2_regularizer(scale=1e-4) super().__init__(name=name) self.p6_from_c5 = Sequential([ L.Conv2D(256, 3, 2, padding='same', use_bias=False, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer), L.Normalization() ]) self.p7_from_p6 = Sequential([ L.Activation(tf.nn.relu), L.Conv2D(256, 3, 2, padding='same', use_bias=False, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer), L.Normalization() ]) self.p5_from_c5 = Sequential([ L.Conv2D(256, 1, 1, use_bias=False, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer), L.Normalization() ]) self.p4_from_c4p5 = UpsampleMerge( kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, name='upsample_merge_c4p5') self.p3_from_c3p4 = UpsampleMerge( kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, name='upsample_merge_c3p4')
def __init__(self, num_anchors, num_classes, kernel_initializer, kernel_regularizer, name='classification_subnet'): super().__init__(name=name) self.num_anchors = num_anchors self.num_classes = num_classes self.conv_pre = Sequential([ Sequential([ L.Conv2D(256, 3, 1, padding='same', use_bias=False, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer), L.Normalization(), L.Activation(tf.nn.relu), ]) for _ in range(4) ]) pi = 0.01 bias_prior_initializer = tf.constant_initializer(-math.log((1 - pi) / pi)) self.conv_out = L.Conv2D(num_anchors * num_classes, 3, 1, padding='same', kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, bias_initializer=bias_prior_initializer)
def __init__(self, dropout_rate, kernel_initializer=None, kernel_regularizer=None, name='mobilenet_v2'): if kernel_initializer is None: kernel_initializer = tf.contrib.layers.variance_scaling_initializer( factor=2.0, mode='FAN_IN', uniform=False) if kernel_regularizer is None: kernel_regularizer = tf.contrib.layers.l2_regularizer(scale=4e-5) super().__init__(name=name) self.input_conv = Sequential([ L.Conv2D(32, 3, strides=2, padding='same', use_bias=False, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer), L.BatchNormalization(), L.Activation(tf.nn.relu6), L.Dropout(dropout_rate) ]) self.bottleneck_1_1 = Bottleneck(16, expansion_factor=1, strides=1, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer) self.bottleneck_2_1 = Bottleneck(24, expansion_factor=6, strides=2, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer) self.bottleneck_2_2 = Bottleneck(24, expansion_factor=6, strides=1, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer) self.bottleneck_3_1 = Bottleneck(32, expansion_factor=6, strides=2, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer) self.bottleneck_3_2 = Bottleneck(32, expansion_factor=6, strides=1, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer) self.bottleneck_3_3 = Bottleneck(32, expansion_factor=6, strides=1, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer) self.bottleneck_4_1 = Bottleneck(64, expansion_factor=6, strides=2, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer) self.bottleneck_4_2 = Bottleneck(64, expansion_factor=6, strides=1, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer) self.bottleneck_4_3 = Bottleneck(64, expansion_factor=6, strides=1, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer) self.bottleneck_4_4 = Bottleneck(64, expansion_factor=6, strides=1, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer) self.bottleneck_5_1 = Bottleneck(96, expansion_factor=6, strides=1, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer) self.bottleneck_5_2 = Bottleneck(96, expansion_factor=6, strides=1, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer) self.bottleneck_5_3 = Bottleneck(96, expansion_factor=6, strides=1, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer) self.bottleneck_6_1 = Bottleneck(160, expansion_factor=6, strides=2, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer) self.bottleneck_6_2 = Bottleneck(160, expansion_factor=6, strides=1, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer) self.bottleneck_6_3 = Bottleneck(160, expansion_factor=6, strides=1, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer) self.bottleneck_7_1 = Bottleneck(320, expansion_factor=6, strides=1, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer) self.output_conv = Sequential([ L.Conv2D(32, 1, use_bias=False, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer), L.BatchNormalization(), L.Activation(tf.nn.relu6), L.Dropout(dropout_rate) ])
def __init__(self, filters, projection_type, kernel_initializer, kernel_regularizer, cardinality=32, name='resnext_bottleneck'): assert filters % cardinality == 0 assert projection_type in ProjectionType super().__init__(name=name) # identity if projection_type is ProjectionType.DOWN: self.conv_identity = Sequential([ L.Conv2D(filters * 4, 3, 2, padding='same', use_bias=False, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer), L.BatchNormalization() ]) elif projection_type is ProjectionType.CONV: self.conv_identity = Sequential([ L.Conv2D(filters * 4, 1, use_bias=False, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer), L.BatchNormalization() ]) elif projection_type is ProjectionType.NONE: self.conv_identity = None # conv_1 self.conv_1 = Sequential([ L.Conv2D(filters * 2, 1, use_bias=False, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer), L.BatchNormalization(), L.Activation(tf.nn.relu) ]) # conv_2 self.conv_2 = [] for _ in range(cardinality): strides = 2 if projection_type is ProjectionType.DOWN else 1 self.conv_2.append( L.Conv2D((filters * 2) // cardinality, 3, strides, padding='same', use_bias=False, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer)) self.bn_2 = L.BatchNormalization() # conv_3 self.conv_3 = Sequential([ L.Conv2D(filters * 4, 1, use_bias=False, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer), L.BatchNormalization() ])
def __init__(self, blocks, growth_rate, compression_factor, bottleneck, dropout_rate, kernel_initializer, kernel_regularizer, name='densenet_bc_imagenet'): super().__init__(name=name) self.conv_1 = Sequential([ L.Conv2D( 2 * growth_rate, 7, 2, padding='same', use_bias=False, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, name='conv_1'), L.BatchNormalization(), L.Activation(tf.nn.relu) ]) self.conv_1_max_pool = L.MaxPooling2D(3, 2, padding='same') self.dense_block_1 = DenseBlock( growth_rate, depth=blocks[1], bottleneck=bottleneck, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, name='dense_block_1') self.transition_layer_1 = TransitionLayer( input_filters=blocks[1] * growth_rate + 64, compression_factor=compression_factor, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, name='transition_layer_1') self.dense_block_2 = DenseBlock( growth_rate, depth=blocks[2], bottleneck=bottleneck, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, name='dense_block_2') self.transition_layer_2 = TransitionLayer( input_filters=blocks[2] * growth_rate + self.transition_layer_1.layers[1].filters, # FIXME: compression_factor=compression_factor, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, name='transition_layer_2') self.dense_block_3 = DenseBlock( growth_rate, depth=blocks[3], bottleneck=bottleneck, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, name='dense_block_3') self.transition_layer_3 = TransitionLayer( input_filters=blocks[3] * growth_rate + self.transition_layer_2.layers[1].filters, # FIXME: compression_factor=compression_factor, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, name='transition_layer_3') self.dense_block_4 = DenseBlock( growth_rate, depth=blocks[4], bottleneck=bottleneck, dropout_rate=dropout_rate, kernel_initializer=kernel_initializer, kernel_regularizer=kernel_regularizer, name='dense_block_4')