Exemple #1
0
    def build(self, input_shape):
        self.kernel = []
        if self.conn_type == "S":
            # scale-only parameter
            self.kernel.append(
                self.add_weight("CrowdLayer", (1, self.num_annotators),
                                initializer=Ones(),
                                trainable=True))
        elif self.conn_type == "B":
            # bias-only parameter
            self.kernel.append(
                self.add_weight("CrowdLayer", (1, self.num_annotators),
                                initializer=Zeros(),
                                trainable=True))
        elif self.conn_type == "S+B" or self.conn_type == "B+S":
            # scale and bias parameters
            self.kernel.append(
                self.add_weight("CrowdLayer", (1, self.num_annotators),
                                initializer=Ones(),
                                trainable=True))
            self.kernel.append(
                self.add_weight("CrowdLayer", (1, self.num_annotators),
                                initializer=Zeros(),
                                trainable=True))
        else:
            raise Exception(
                "Unknown connection type for CrowdsRegression layer!")

        super(CrowdsRegression,
              self).build(input_shape)  # Be sure to call this somewhere!
def get_generator_unet(n_block=3):
    input = Input(shape=(image_size, image_size, input_channel))
    # encoder
    e0 = Conv2D(64, kernel_size=4, padding='same', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(input)  # use reflection padding instead
    e0 = BatchNormalization()(e0)
    e0 = Activation('relu')(e0)
    e1 = conv_block(e0, 128, downsample=True, dropout=False)  # 1/2
    e2 = conv_block(e1, 256, downsample=True, dropout=False)  # 1/4
    e3 = conv_block(e2, 512, downsample=True, dropout=False)  # 1/8
    e4 = conv_block(e3, 512, downsample=True, dropout=False)  # 1/16
    e5 = conv_block(e4, 512, downsample=True, dropout=False)  # 1/32
    e6 = conv_block(e5, 512, downsample=True, dropout=False)  # 1/64
    e7 = conv_block(e6, 512, downsample=True, dropout=False)  # 1/128
    # decoder
    d0 = conv_block(e7, 512, downsample=False, dropout=True)  # 1/64
    d1 = Concatenate(axis=-1)([d0, e6])
    d1 = conv_block(d1, 512, downsample=False, dropout=True)  # 1/32
    d2 = Concatenate(axis=-1)([d1, e5])
    d2 = conv_block(d2, 512, downsample=False, dropout=True)  # 1/16
    d3 = Concatenate(axis=-1)([d2, e4])
    d3 = conv_block(d3, 512, downsample=False, dropout=True)  # 1/8
    d4 = Concatenate(axis=-1)([d3, e3])
    d4 = conv_block(d4, 256, downsample=False, dropout=True)  # 1/4
    d5 = Concatenate(axis=-1)([d4, e2])
    d5 = conv_block(d5, 128, downsample=False, dropout=True)  # 1/2
    d6 = Concatenate(axis=-1)([d5, e1])
    d6 = conv_block(d6, 64, downsample=False, dropout=True)  # 1
    # out
    x = Conv2D(output_channel, kernel_size=3, padding='same', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(d6)  # use reflection padding instead
    x = BatchNormalization()(x)
    x = Activation('tanh')(x)
    generator = Model(inputs=input, outputs=x)
    return generator
Exemple #3
0
    def __init__(self,
                 embed_dim,
                 num_heads,
                 ff_dim,
                 dropout=0.1,
                 prenorm=False,
                 approximate_gelu=False):
        super(TransformerBlock, self).__init__()
        self.att = MultiHeadSelfAttention(embed_dim, num_heads)

        self.ffn = tf.keras.Sequential([
            Dense(ff_dim,
                  kernel_initializer=TruncatedNormal(mean=0.,
                                                     stddev=TRUNC_STD),
                  bias_initializer=Zeros()),
            tfa.layers.GELU(approximate=approximate_gelu),
            Dense(embed_dim,
                  kernel_initializer=TruncatedNormal(mean=0.,
                                                     stddev=TRUNC_STD),
                  bias_initializer=Zeros()),
        ])

        self.layernorm1 = LayerNormalization(epsilon=1e-6)
        self.layernorm2 = LayerNormalization(epsilon=1e-6)

        self.dropout1 = Dropout(dropout)
        self.dropout2 = Dropout(dropout)
        self.prenorm = prenorm
def get_generator(n_block=3):
    input = Input(shape=(image_size, image_size, input_channel))
    x = Conv2D(64, kernel_size=7, padding='same', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(input)  # use reflection padding instead
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    # downsample
    x = Conv2D(128, kernel_size=3, strides=2, padding='same', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    # downsample
    x = Conv2D(256, kernel_size=3, strides=2, padding='same', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    for i in range(n_block):
        x = residual_block(x)
    # upsample
    x = Conv2DTranspose(128, kernel_size=3, strides=2, padding='same',
                        kernel_initializer=RandomNormal(mean=0.0, stddev=0.02), bias_initializer=Zeros())(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    # upsample
    x = Conv2DTranspose(64, kernel_size=3, strides=2, padding='same', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    # out
    x = Conv2D(output_channel, kernel_size=7, padding='same', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(x)  # use reflection padding instead
    x = BatchNormalization()(x)
    x = Activation('tanh')(x)
    generator = Model(inputs=input, outputs=x)
    return generator
def conv_block(feature, out_channel, downsample=True, dropout=False):
    if downsample:
        x = Conv2D(out_channel, kernel_size=4, strides=2, padding='same', kernel_initializer=RandomNormal(
            mean=0.0, stddev=0.02), bias_initializer=Zeros())(feature)
    else:
        x = Conv2DTranspose(out_channel, kernel_size=4, strides=2, padding='same', kernel_initializer=RandomNormal(
            mean=0.0, stddev=0.02), bias_initializer=Zeros())(feature)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    if dropout:
        x = Dropout(0.5)(x)
    return x
def residual_block(feature, dropout=False):
    x = Conv2D(256, kernel_size=3, strides=1, padding='same', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(feature)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    if dropout:
        x = Dropout(0.5)(x)
    x = Conv2D(256, kernel_size=3, strides=1, padding='same', kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    return Add()([feature, x])
Exemple #7
0
    def build(self, input_shape):
        input_size = input_shape[-1]
        hidden_units = [int(input_size)] + list(self.hidden_units)
        self.kernels = [
            self.add_weight(name='kernel' + str(i),
                            shape=(hidden_units[i], hidden_units[i + 1]),
                            initializer=glorot_normal(seed=self.seed),
                            regularizer=l2(self.l2_reg),
                            trainable=True)
            for i in range(len(self.hidden_units))
        ]
        self.bias = [
            self.add_weight(name='bias' + str(i),
                            shape=(self.hidden_units[i], ),
                            initializer=Zeros(),
                            trainable=True)
            for i in range(len(self.hidden_units))
        ]
        if self.use_bn:
            self.bn_layers = [
                tf.keras.layers.BatchNormalization()
                for _ in range(len(self.hidden_units))
            ]

        self.dropout_layers = [
            tf.keras.layers.Dropout(self.dropout_rate, seed=self.seed + i)
            for i in range(len(self.hidden_units))
        ]

        self.activation_layers = [
            activation_layer(self.activation)
            for _ in range(len(self.hidden_units))
        ]

        super(DNN, self).build(input_shape)  # Be sure to call this somewhere!
Exemple #8
0
 def __init__(
         self,
         n_feat,
         n_hidden,
         out=1,
         name_idx=0,
         hidden_activation="sigmoid",
         output_activation="sigmoid",
         feat_weight_trainable=True,
         kernel_initializer=VarianceScaling(),
         bias_initializer=Zeros(),
         width=1.,  #Width of Softmin layer
         **kwargs):
     self.n_feat = n_feat
     self.n_hidden = n_hidden
     self.out = out
     self.name_idx = name_idx
     self.kernel_initializer = kernel_initializer
     self.bias_initializer = bias_initializer
     self.feat_weight_trainable = feat_weight_trainable
     self.hidden_activation = activations.get(hidden_activation)
     self.output_activation = activations.get(output_activation)
     self.width = width
     self.min_layer = Softmin(width=self.width)
     super(DenseAttentionwFeatWeights, self).__init__(**kwargs)
Exemple #9
0
def score_block(inputs, conv_type, num_classes, name, l2_reg=0.):
    """
	1x1 Convolution applied in FCN8 to compute class scores at different levels of the network.
	Parameters:
		inputs: Input tensor
		conv_type: 'ds' for depthwise separable convolutions, in other case standard convolution operation
		num_classes: Number of classes to classify
		name: Name of the layer. 'score32' or 'score16' or 'score8'
		l2_reg: l2 regularizer value
	"""
    #He initialization
    if name == 'score32':
        in_channels = inputs.get_shape().as_list()[-1]
        stddev = (2 / in_channels)**.5
    elif name == 'score16':
        stddev = 0.01
    elif name == 'score8':
        stddev = .001
    #w_initializer = TruncatedNormal(stddev=stddev)
    w_initializer = tf.keras.initializers.he_normal()
    b_initializer = Zeros()
    if conv_type == 'ds':
        x = SeparableConv2D(num_classes, 1, padding='same', strides=1, name=name, depthwise_initializer=w_initializer,\
         pointwise_initializer=w_initializer, bias_initializer=b_initializer,\
         pointwise_regularizer=l2(l2_reg), depthwise_regularizer=l2(l2_reg))(inputs)
    else:
        x = Conv2D(num_classes, (1,1), padding='same', strides = (1,1), name=name,\
            kernel_initializer=w_initializer, bias_initializer=b_initializer, kernel_regularizer=l2(l2_reg))(inputs)
    return x
Exemple #10
0
    def build(self, input_shape):

        if not isinstance(input_shape, list) or len(input_shape) != 2:
            raise ValueError('A `LocalActivationUnit` layer should be called '
                             'on a list of 2 inputs')

        if len(input_shape[0]) != 3 or len(input_shape[1]) != 3:
            raise ValueError("Unexpected inputs dimensions %d and %d, expect to be 3 dimensions" % (
                len(input_shape[0]), len(input_shape[1])))

        if input_shape[0][-1] != input_shape[1][-1] or input_shape[0][1] != 1:
            raise ValueError('A `LocalActivationUnit` layer requires '
                             'inputs of a two inputs with shape (None,1,embedding_size) and (None,T,embedding_size)'
                             'Got different shapes: %s,%s' % (input_shape))
        size = 4 * \
               int(input_shape[0][-1]
                   ) if len(self.hidden_units) == 0 else self.hidden_units[-1]
        self.kernel = self.add_weight(shape=(size, 1),
                                      initializer=glorot_normal(
                                          seed=self.seed),
                                      name="kernel")
        self.bias = self.add_weight(
            shape=(1,), initializer=Zeros(), name="bias")
        #self.dnn = DNN(self.hidden_units, self.activation, self.l2_reg,
        #               self.dropout_rate, self.use_bn, seed=self.seed)
        super(LocalActivationUnit, self).build(
            input_shape)  # Be sure to call this somewhere!
Exemple #11
0
    def _deconv(self, feature_map, f, k, s):
        """
        The deconvolution operation to upsample the average feature map downstream
        f = # of filters from previous leaky layer (int)
        k = size of kernel from previous leaky layer
        s = amount of stride from previous leaky layer
        """

        x = Input(shape=(None, None, 1))

        y = Conv2DTranspose(
            filters=1,
            kernel_size=(3, 3),
            strides=(2, 2),
            padding='same',
            kernel_initializer=Ones(),  # set all weights to 1
            bias_initializer=Zeros()  # set all biases to 0
        )(x)

        deconv_model = Model(inputs=[x], outputs=[y])

        inps = [deconv_model.input, K.learning_phase()]  # input placeholder
        outs = [deconv_model.layers[-1].output]  # output placeholder

        deconv_func = K.function(inps, outs)  # evaluation function

        return deconv_func([feature_map, 0])[0]
Exemple #12
0
    def build(self, input_shape):
        super().build(input_shape)

        if self.use_bias:
            self.global_bias = self.add_weight(
                shape=(self.num_class,) if self.task == "multiclass" else (1,),
                initializer=Zeros(), name="global_bias")
Exemple #13
0
 def __init__(self, embed_dim, num_heads=8):
     super(MultiHeadSelfAttention, self).__init__()
     self.embed_dim = embed_dim
     self.num_heads = num_heads
     if embed_dim % num_heads != 0:
         raise ValueError(
             f"embedding dimension = {embed_dim} should be divisible by number of heads = {num_heads}"
         )
     self.projection_dim = embed_dim // num_heads
     self.query_dense = Dense(embed_dim,
                              kernel_initializer=TruncatedNormal(
                                  mean=0., stddev=TRUNC_STD),
                              use_bias=False)
     self.key_dense = Dense(embed_dim,
                            kernel_initializer=TruncatedNormal(
                                mean=0., stddev=TRUNC_STD),
                            use_bias=False)
     self.value_dense = Dense(embed_dim,
                              kernel_initializer=TruncatedNormal(
                                  mean=0., stddev=TRUNC_STD),
                              use_bias=False)
     self.combine_heads = Dense(embed_dim,
                                kernel_initializer=TruncatedNormal(
                                    mean=0., stddev=TRUNC_STD),
                                bias_initializer=Zeros())
Exemple #14
0
    def build(self, input_shape):
        if self.conn_type == "MW":
            # matrix of weights per annotator
            self.kernel = self.add_weight(
                "CrowdLayer",
                (self.output_dim, self.output_dim, self.num_annotators),
                initializer=init_identities,
                trainable=True)
        elif self.conn_type == "VW":
            # vector of weights (one scale per class) per annotator
            self.kernel = self.add_weight(
                "CrowdLayer", (self.output_dim, self.num_annotators),
                initializer=Ones(),
                trainable=True)
        elif self.conn_type == "VB":
            # two vectors of weights (one scale and one bias per class) per annotator
            self.kernel = []
            self.kernel.append(
                self.add_weight("CrowdLayer",
                                (self.output_dim, self.num_annotators),
                                initializer=Zeros(),
                                trainable=True))
        elif self.conn_type == "VW+B":
            # two vectors of weights (one scale and one bias per class) per annotator
            self.kernel = []
            self.kernel.append(
                self.add_weight("CrowdLayer",
                                (self.output_dim, self.num_annotators),
                                initializer=Ones(),
                                trainable=True))
            self.kernel.append(
                self.add_weight("CrowdLayer",
                                (self.output_dim, self.num_annotators),
                                initializer=Zeros(),
                                trainable=True))
        elif self.conn_type == "SW":
            # single weight value per annotator
            self.kernel = self.add_weight("CrowdLayer",
                                          (self.num_annotators, 1),
                                          initializer=Ones(),
                                          trainable=True)
        else:
            raise Exception(
                "Unknown connection type for CrowdsClassification layer!")

        super(CrowdsClassification,
              self).build(input_shape)  # Be sure to call this somewhere!
    def __init__(self,
                 n_attention,
                 n_attention_hidden,
                 n_attention_out,
                 n_feat,
                 n_hidden,
                 activation="sigmoid",
                 concat_activity_regularizer=None,
                 kernel_initializer=VarianceScaling(distribution="uniform"),
                 kernel_regularizer='l1',
                 bias_initializer=Zeros(),
                 bias_regularizer='l1',
                 attention_initializer=VarianceScaling(distribution="uniform"),
                 attention_hidden_activation="sigmoid",
                 attention_output_activation="sigmoid",
                 attention_trainable=True,
                 batch_norm_kwargs={},
                 **kwargs):
        self.n_attention = n_attention
        self.n_attention_hidden = n_attention_hidden
        self.n_attention_out = n_attention_out
        self.n_feat = n_feat
        self.n_hidden = n_hidden
        self.activation = activations.get(activation)
        self.concat_activity_regularizer = concat_activity_regularizer
        self.kernel_initializer = kernel_initializer
        self.kernel_regularizer = kernel_regularizer
        self.bias_initializer = bias_initializer
        self.bias_regularizer = bias_regularizer
        self.attention_initializer = attention_initializer
        self.attention_hidden_activation = attention_hidden_activation
        self.attention_output_activation = attention_output_activation
        self.attention_trainable = attention_trainable
        self.batch_norm_kwargs = batch_norm_kwargs

        self.attention_layers = []
        for i in range(self.n_attention):
            attention_layer = DenseAttention(
                n_feat=self.n_feat,
                n_hidden=self.n_attention_hidden,
                out=self.n_attention_out,
                hidden_activation=self.attention_hidden_activation,
                output_activation=self.attention_output_activation,
                kernel_initializer=self.attention_initializer,
                trainable=self.attention_trainable)
            self.attention_layers.append(attention_layer)
        self.concat_layer = Concatenate(
            activity_regularizer=self.concat_activity_regularizer)
        #Current (v3): Use Dense layer and batch normalization
        self.dense_layer = Dense(
            self.n_hidden,
            activation=None,  #Batch normalization before activation
            kernel_initializer=self.kernel_initializer,
            bias_initializer=self.bias_initializer,
            kernel_regularizer=self.kernel_regularizer,
            bias_regularizer=self.bias_regularizer,
        )
        self.batch_norm_layer = BatchNormalization(**batch_norm_kwargs)
        super(ConcatAttentions, self).__init__(**kwargs)
Exemple #16
0
    def build(self, input_shape):

        if self.use_bias:
            self.global_bias = self.add_weight(
                shape=(1,), initializer=Zeros(), name="global_bias")

        # Be sure to call this somewhere!
        super(PredictionLayer, self).build(input_shape)
Exemple #17
0
 def build(self, input_shape):
     self._g = self.add_weight(name='gain',
                               shape=(input_shape[-1], ),
                               initializer=Ones(),
                               trainable=True)
     self._b = self.add_weight(name='bias',
                               shape=(input_shape[-1], ),
                               initializer=Zeros(),
                               trainable=True)
 def __init__(self):
     """
     Initialize multi-layer neural network
     """
     self.model = Sequential()
     self.loss = None
     self.metric = []
     self.optimizer = None
     self.initilizer = Zeros()
Exemple #19
0
 def build(self, input_shape):
     self.gamma = self.add_weight(name='gamma',
                                  shape=input_shape[-1:],
                                  initializer=Ones(),
                                  trainable=True)
     self.beta = self.add_weight(name='beta',
                                 shape=input_shape[-1:],
                                 initializer=Zeros(),
                                 trainable=True)
     super().build(input_shape)
Exemple #20
0
    def build(self, input_shape):

        self.gamma = self.add_weight(
            name="gamma", shape=input_shape[-1:], initializer=Ones(), trainable=True
        )

        self.beta = self.add_weight(
            name="beta", shape=input_shape[-1:], initializer=Zeros(), trainable=True
        )

        super(LayerNormalization, self).build(input_shape)
Exemple #21
0
def get_discriminator(n_layers=4, use_sigmoid=True):
    input = Input(shape=(image_size, image_size, input_channel + output_channel))
    x = Conv2D(64, kernel_size=4, padding='same', strides=2, kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(input)
    x = LeakyReLU(alpha=0.2)(x)
    for i in range(1, n_layers):
        x = Conv2D(64 * 2 ** i, kernel_size=4, padding='same', strides=2, kernel_initializer=RandomNormal(
            mean=0.0, stddev=0.02), bias_initializer=Zeros())(x)
        x = BatchNormalization()(x)
        x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(64 * 2 ** n_layers, kernel_size=4, padding='same', strides=1, kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(1, kernel_size=4, padding='same', strides=1, kernel_initializer=RandomNormal(
        mean=0.0, stddev=0.02), bias_initializer=Zeros())(x)
    if use_sigmoid:
        x = Activation('sigmoid')(x)
    discriminator = Model(inputs=input, outputs=x)
    return discriminator
Exemple #22
0
 def build(self, input_shape):
     self.bn = tf.keras.layers.BatchNormalization(axis=self.axis,
                                                  epsilon=self.epsilon,
                                                  center=False,
                                                  scale=False)
     self.alphas = self.add_weight(
         shape=(input_shape[-1], ),
         initializer=Zeros(),
         dtype=tf.float32,
         name='dice_alpha')  # name='alpha_'+self.name
     super(Dice, self).build(input_shape)  # Be sure to call this somewhere!
     self.uses_learning_phase = True
Exemple #23
0
 def __init__(self,
              num_classes,
              from_logits=True,
              name='miou',
              dtype=tf.int32,
              **kwargs):
     super().__init__(name=name, dtype=dtype, **kwargs)
     self.num_classes = num_classes
     self.from_logits = from_logits
     self.total_cm = self.add_weight('total_confusion_matrix',
                                     shape=(num_classes, num_classes),
                                     initializer=Zeros(),
                                     dtype=self.dtype)
 def create_model_categorical(self, input_shape, output_dim):
     self.model.add(LSTM(50,
                         input_shape=input_shape,
                         kernel_initializer=RandomUniform(seed=SEED_VALUE),
                         bias_initializer=Zeros(),
                         return_sequences=True,
                         activation='relu'))
     self.model.add(LSTM(50,
                         kernel_initializer=RandomUniform(seed=SEED_VALUE),
                         bias_initializer=Zeros(),
                         # return_sequences=True,
                         activation='relu'))
     # self.model.add(Dense(20,
     #                      kernel_initializer=RandomUniform(seed=SEED_VALUE),
     #                      bias_initializer=Zeros(),
     #                      activation='relu'))
     self.model.add(Dense(output_dim,
                          kernel_initializer=RandomUniform(seed=SEED_VALUE),
                          bias_initializer=Zeros(),
                          activation='softmax'))
     print(colored('Model RNN', 'green'))
     self.model.summary()
Exemple #25
0
    def __init__(self, xdim, ydim, adim):
        super().__init__()

        self.input_output_layer = xdim + ydim + adim
        self.hidden_layer = 128
        self.shapes = [
            self.input_output_layer, self.hidden_layer, self.input_output_layer
        ]

        self.zeros = Zeros()
        self.ones = Ones()

        self.is_built = False
 def create_model(self, input_shape, output_dim):
     self.model.add(SimpleRNN(50,
                              input_shape=input_shape,
                              kernel_initializer=RandomUniform(seed=SEED_VALUE),
                              bias_initializer=Zeros(),
                              return_sequences=True,
                              activation='relu'))
     self.model.add(SimpleRNN(50,
                              kernel_initializer=RandomUniform(seed=SEED_VALUE),
                              bias_initializer=Zeros(),
                              activation='relu', ))
     # self.model.add(Dense(10,
     #                      kernel_initializer=RandomUniform(seed=SEED_VALUE),
     #                      bias_initializer=Zeros(),
     #                      activation='relu'))
     self.model.add(Dense(output_dim,
                          kernel_initializer=RandomUniform(seed=SEED_VALUE),
                          bias_initializer=Zeros(),
                          # use_bias=False,
                          activation='relu'))
     print(colored('Model RNN', 'green'))
     self.model.summary()
Exemple #27
0
def build_fcn16s(nb_classes, target_size=(None, None)):
    inputs = Input(shape=(*target_size, 3))
    vgg = VGG16(weights='imagenet',
                include_top=False,
                input_tensor=inputs,
                input_shape=(*target_size, 3))
    x = Conv2D(4096, (7, 7), activation='relu', padding='same')(vgg.output)
    x = Dropout(0.5)(x)
    x = Conv2D(4096, (1, 1), activation='relu', padding='same')(x)
    x = Dropout(0.5)(x)
    x = Conv2D(nb_classes, (1, 1),
               padding='same',
               kernel_initializer='he_normal')(x)

    x = Conv2DTranspose(nb_classes, (4, 4),
                        strides=(2, 2),
                        use_bias=False,
                        padding='same',
                        activation='relu',
                        name='fcn16s-transpose-first')(x)

    skip_con = Conv2D(nb_classes, (1, 1),
                      strides=(1, 1),
                      padding='same',
                      bias_initializer=Zeros(),
                      kernel_initializer=Zeros(),
                      name='fcn16s-skip-con')(
                          vgg.get_layer(name="block4_pool").output)
    x = Add()([x, skip_con])
    x = Conv2DTranspose(nb_classes, (32, 32),
                        strides=(16, 16),
                        use_bias=False,
                        padding='same',
                        activation='softmax',
                        name='fcn16s-transpose-second')(x)

    model = Model(inputs=inputs, outputs=x)
    return model
Exemple #28
0
    def __init__(self,
                 n_attention,
                 n_attention_hidden,
                 n_attention_out,
                 n_feat,
                 n_hidden,
                 activation="sigmoid",
                 concat_activity_regularizer=None,
                 kernel_initializer=VarianceScaling(distribution="uniform"),
                 kernel_regularizer='l1',
                 bias_initializer=Zeros(),
                 bias_regularizer='l1',
                 attention_initializer=VarianceScaling(distribution="uniform"),
                 attention_hidden_activation="sigmoid",
                 attention_output_activation="sigmoid",
                 attention_trainable=True,
                 attention_feat_weight_trainable=True,
                 **kwargs):
        self.n_attention = n_attention
        self.n_attention_hidden = n_attention_hidden
        self.n_attention_out = n_attention_out
        self.n_feat = n_feat
        self.n_hidden = n_hidden
        self.activation = activations.get(activation)
        self.concat_activity_regularizer = concat_activity_regularizer
        self.kernel_initializer = kernel_initializer
        self.kernel_regularizer = kernel_regularizer
        self.bias_initializer = bias_initializer
        self.bias_regularizer = bias_regularizer
        self.attention_initializer = attention_initializer
        self.attention_hidden_activation = attention_hidden_activation
        self.attention_output_activation = attention_output_activation
        self.attention_trainable = attention_trainable
        self.attention_feat_weight_trainable = attention_feat_weight_trainable

        self.concat_layer = Concatenate(
            activity_regularizer=self.concat_activity_regularizer)
        self.attention_layers = []
        for i in range(self.n_attention):
            attention_layer = DenseAttentionwFeatWeights(
                n_feat=self.n_feat,
                n_hidden=self.n_attention_hidden,
                out=self.n_attention_out,
                hidden_activation=self.attention_hidden_activation,
                output_activation=self.attention_output_activation,
                feat_weight_trainable=self.attention_feat_weight_trainable,
                initializer=self.attention_initializer,
                trainable=self.attention_trainable)
            self.attention_layers.append(attention_layer)
        super(ConcatAttentionswFeatWeights, self).__init__(**kwargs)
    def build(self, input_shapes):

        self.neigh_weights = self.add_weight(
            shape=(self.input_dim, self.units),
            initializer=glorot_uniform(seed=self.seed),
            regularizer=l2(self.l2_reg),
            name="neigh_weights")
        if self.use_bias:
            self.bias = self.add_weight(shape=self.units,
                                        initializer=Zeros(),
                                        name='bias_weight')

        self.dropout = Dropout(self.dropout_rate)
        self.built = True
Exemple #30
0
    def __init__(self,
                 num_classes,
                 in_channels,
                 feat_channels=256,
                 stacked_convs=4,
                 norm='bn',
                 strides=(8, 16, 32, 64, 128),
                 reg_max=16,
                 reg_topk=4,
                 reg_channels=64):
        super().__init__(1,
                         num_classes,
                         in_channels,
                         feat_channels,
                         stacked_convs,
                         centerness=False,
                         bbox_out_channels=4 * (reg_max + 1),
                         concat=False,
                         norm=norm,
                         num_levels=len(strides))
        self.strides = strides
        self.reg_max = reg_max
        self.reg_topk = reg_topk
        self.reg_channels = reg_channels

        self.reg_conf = Sequential([
            Linear(4 * (reg_topk + 1),
                   reg_channels,
                   act='relu',
                   kernel_init=RandomNormal(stddev=0.01),
                   bias_init=Zeros()),
            Linear(reg_channels,
                   1,
                   act='sigmoid',
                   kernel_init=RandomNormal(stddev=0.01),
                   bias_init=Zeros()),
        ])